aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bnx2x
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/Makefile3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h564
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c1904
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h340
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c133
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h3281
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c1035
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h98
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h324
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h44
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c799
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h35
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c3754
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h84
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c1181
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h163
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c3001
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h584
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c295
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h27
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2034
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h434
26 files changed, 15234 insertions, 4913 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/Makefile b/drivers/net/ethernet/broadcom/bnx2x/Makefile
index 48fbdd48f88..116762daae0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/Makefile
+++ b/drivers/net/ethernet/broadcom/bnx2x/Makefile
@@ -4,4 +4,5 @@
obj-$(CONFIG_BNX2X) += bnx2x.o
-bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
+bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
+bnx2x-$(CONFIG_BNX2X_SRIOV) += bnx2x_vfpf.o bnx2x_sriov.o
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e8d4db10c8f..8206a293e6b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1,21 +1,24 @@
/* bnx2x.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
*/
#ifndef BNX2X_H
#define BNX2X_H
+
+#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/dma-mapping.h>
#include <linux/types.h>
+#include <linux/pci_regs.h>
/* compilation time flags */
@@ -23,20 +26,18 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.78.00-0"
-#define DRV_MODULE_RELDATE "2012/09/27"
+#define DRV_MODULE_VERSION "1.78.19-0"
+#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
#define BCM_DCBNL
#endif
-
#include "bnx2x_hsi.h"
#include "../cnic_if.h"
-
#define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt)
#include <linux/mdio.h>
@@ -48,6 +49,13 @@
#include "bnx2x_sp.h"
#include "bnx2x_dcb.h"
#include "bnx2x_stats.h"
+#include "bnx2x_vfpf.h"
+
+enum bnx2x_int_mode {
+ BNX2X_INT_MODE_MSIX,
+ BNX2X_INT_MODE_INTX,
+ BNX2X_INT_MODE_MSI
+};
/* error/debug prints */
@@ -67,13 +75,22 @@
#define BNX2X_MSG_DCB 0x8000000
/* regular debug print */
+#define DP_INNER(fmt, ...) \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__);
+
#define DP(__mask, fmt, ...) \
do { \
if (unlikely(bp->msg_enable & (__mask))) \
- pr_notice("[%s:%d(%s)]" fmt, \
- __func__, __LINE__, \
- bp->dev ? (bp->dev->name) : "?", \
- ##__VA_ARGS__); \
+ DP_INNER(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define DP_AND(__mask, fmt, ...) \
+do { \
+ if (unlikely((bp->msg_enable & (__mask)) == __mask)) \
+ DP_INNER(fmt, ##__VA_ARGS__); \
} while (0)
#define DP_CONT(__mask, fmt, ...) \
@@ -104,7 +121,6 @@ do { \
#define BNX2X_ERROR(fmt, ...) \
pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
-
/* before we have a dev->name use dev_info() */
#define BNX2X_DEV_INFO(fmt, ...) \
do { \
@@ -112,32 +128,31 @@ do { \
dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \
} while (0)
+/* Error handling */
+void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int);
#ifdef BNX2X_STOP_ON_ERROR
-void bnx2x_int_disable(struct bnx2x *bp);
#define bnx2x_panic() \
do { \
bp->panic = 1; \
BNX2X_ERR("driver assert\n"); \
- bnx2x_int_disable(bp); \
- bnx2x_panic_dump(bp); \
+ bnx2x_panic_dump(bp, true); \
} while (0)
#else
#define bnx2x_panic() \
do { \
bp->panic = 1; \
BNX2X_ERR("driver assert\n"); \
- bnx2x_panic_dump(bp); \
+ bnx2x_panic_dump(bp, false); \
} while (0)
#endif
#define bnx2x_mc_addr(ha) ((ha)->addr)
#define bnx2x_uc_addr(ha) ((ha)->addr)
-#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
-#define U64_HI(x) (u32)(((u64)(x)) >> 32)
+#define U64_LO(x) ((u32)(((u64)(x)) & 0xffffffff))
+#define U64_HI(x) ((u32)(((u64)(x)) >> 32))
#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
-
#define REG_ADDR(bp, offset) ((bp->regview) + (offset))
#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
@@ -240,8 +255,37 @@ enum {
BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
};
-#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
+/* use a value high enough to be above all the PFs, which has least significant
+ * nibble as 8, so when cnic needs to come up with a CID for UIO to use to
+ * calculate doorbell address according to old doorbell configuration scheme
+ * (db_msg_sz 1 << 7 * cid + 0x40 DPM offset) it can come up with a valid number
+ * We must avoid coming up with cid 8 for iscsi since according to this method
+ * the designated UIO cid will come out 0 and it has a special handling for that
+ * case which doesn't suit us. Therefore will will cieling to closes cid which
+ * has least signigifcant nibble 8 and if it is 8 we will move forward to 0x18.
+ */
+
+#define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \
(bp)->max_cos)
+/* amount of cids traversed by UIO's DPM addition to doorbell */
+#define UIO_DPM 8
+/* roundup to DPM offset */
+#define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \
+ UIO_DPM))
+/* offset to nearest value which has lsb nibble matching DPM */
+#define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \
+ (UIO_DPM * 2))
+/* add offset to rounded-up cid to get a value which could be used with UIO */
+#define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp))
+/* but wait - avoid UIO special case for cid 0 */
+#define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \
+ (UIO_DPM_ALIGN(bp) == UIO_DPM))
+/* Properly DPM aligned CID dajusted to cid 0 secal case */
+#define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \
+ (UIO_DPM_CID0_OFFSET(bp)))
+/* how many cids were wasted - need this value for cid allocation */
+#define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \
+ BNX2X_1st_NON_L2_ETH_CID(bp))
/* iSCSI L2 */
#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
/* FCoE L2 */
@@ -302,6 +346,7 @@ struct sw_tx_bd {
u8 flags;
/* Set on the first BD descriptor when there is a split BD */
#define BNX2X_TSO_SPLIT_BD (1<<0)
+#define BNX2X_HAS_SECOND_PBD (1<<1)
};
struct sw_rx_page {
@@ -334,6 +379,9 @@ union db_prod {
#define SGE_PAGE_SIZE PAGE_SIZE
#define SGE_PAGE_SHIFT PAGE_SHIFT
#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
+#define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \
+ SGE_PAGES), 0xffff)
/* SGE ring related macros */
#define NUM_RX_SGE_PAGES 2
@@ -353,7 +401,7 @@ union db_prod {
/*
* Number of required SGEs is the sum of two:
* 1. Number of possible opened aggregations (next packet for
- * these aggregations will probably consume SGE immidiatelly)
+ * these aggregations will probably consume SGE immediately)
* 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
* after placement on BD for new TPA aggregation)
*
@@ -374,7 +422,6 @@ union db_prod {
#define BIT_VEC64_ELEM_SHIFT 6
#define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1)
-
#define __BIT_VEC64_SET_BIT(el, bit) \
do { \
el = ((el) | ((u64)0x1 << (bit))); \
@@ -385,7 +432,6 @@ union db_prod {
el = ((el) & (~((u64)0x1 << (bit)))); \
} while (0)
-
#define BIT_VEC64_SET_BIT(vec64, idx) \
__BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
(idx) & BIT_VEC64_ELEM_MASK)
@@ -406,8 +452,6 @@ union db_prod {
/*******************************************************/
-
-
/* Number of u64 elements in SGE mask array */
#define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ)
#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
@@ -438,7 +482,7 @@ struct bnx2x_agg_info {
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash;
- bool l4_rxhash;
+ enum pkt_hash_types rxhash_type;
u16 gro_size;
u16 full_page;
};
@@ -479,13 +523,29 @@ enum bnx2x_tpa_mode_t {
struct bnx2x_fastpath {
struct bnx2x *bp; /* parent */
-#define BNX2X_NAPI_WEIGHT 128
struct napi_struct napi;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int state;
+#define BNX2X_FP_STATE_IDLE 0
+#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
+#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
+#define BNX2X_FP_STATE_DISABLED (1 << 2)
+#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
+#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
+#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
+#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
+#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
+#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
+ /* protect state */
+ spinlock_t lock;
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
union host_hc_status_block status_blk;
- /* chip independed shortcuts into sb structure */
+ /* chip independent shortcuts into sb structure */
__le16 *sb_index_values;
__le16 *sb_running_index;
- /* chip independed shortcut into rx_prods_offset memory */
+ /* chip independent shortcut into rx_prods_offset memory */
u32 ustorm_rx_prods_offset;
u32 rx_buf_size;
@@ -553,6 +613,138 @@ struct bnx2x_fastpath {
#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
+{
+ spin_lock_init(&fp->lock);
+ fp->state = BNX2X_FP_STATE_IDLE;
+}
+
+/* called from the device poll routine to get ownership of a FP */
+static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
+{
+ bool rc = true;
+
+ spin_lock_bh(&fp->lock);
+ if (fp->state & BNX2X_FP_LOCKED) {
+ WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
+ fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
+ rc = false;
+ } else {
+ /* we don't care if someone yielded */
+ fp->state = BNX2X_FP_STATE_NAPI;
+ }
+ spin_unlock_bh(&fp->lock);
+ return rc;
+}
+
+/* returns true is someone tried to get the FP while napi had it */
+static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+{
+ bool rc = false;
+
+ spin_lock_bh(&fp->lock);
+ WARN_ON(fp->state &
+ (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
+
+ if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
+ rc = true;
+
+ /* state ==> idle, unless currently disabled */
+ fp->state &= BNX2X_FP_STATE_DISABLED;
+ spin_unlock_bh(&fp->lock);
+ return rc;
+}
+
+/* called from bnx2x_low_latency_poll() */
+static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+{
+ bool rc = true;
+
+ spin_lock_bh(&fp->lock);
+ if ((fp->state & BNX2X_FP_LOCKED)) {
+ fp->state |= BNX2X_FP_STATE_POLL_YIELD;
+ rc = false;
+ } else {
+ /* preserve yield marks */
+ fp->state |= BNX2X_FP_STATE_POLL;
+ }
+ spin_unlock_bh(&fp->lock);
+ return rc;
+}
+
+/* returns true if someone tried to get the FP while it was locked */
+static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+{
+ bool rc = false;
+
+ spin_lock_bh(&fp->lock);
+ WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
+
+ if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
+ rc = true;
+
+ /* state ==> idle, unless currently disabled */
+ fp->state &= BNX2X_FP_STATE_DISABLED;
+ spin_unlock_bh(&fp->lock);
+ return rc;
+}
+
+/* true if a socket is polling, even if it did not get the lock */
+static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
+{
+ WARN_ON(!(fp->state & BNX2X_FP_OWNED));
+ return fp->state & BNX2X_FP_USER_PEND;
+}
+
+/* false if fp is currently owned */
+static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+{
+ int rc = true;
+
+ spin_lock_bh(&fp->lock);
+ if (fp->state & BNX2X_FP_OWNED)
+ rc = false;
+ fp->state |= BNX2X_FP_STATE_DISABLED;
+ spin_unlock_bh(&fp->lock);
+
+ return rc;
+}
+#else
+static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
+{
+}
+
+static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
+{
+ return true;
+}
+
+static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+{
+ return false;
+}
+
+static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+{
+ return false;
+}
+
+static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+{
+ return false;
+}
+
+static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
+{
+ return false;
+}
+static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+{
+ return true;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
/* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
@@ -568,12 +760,10 @@ struct bnx2x_fastpath {
txdata_ptr[FIRST_TX_COS_INDEX] \
->var)
-
#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp))
#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp))
#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
-
/* MC hsi */
#define MAX_FETCH_BD 13 /* HW max BDs per packet */
#define RX_COPY_THRESH 92
@@ -600,9 +790,10 @@ struct bnx2x_fastpath {
* START_BD - describes packed
* START_BD(splitted) - includes unpaged data segment for GSO
* PARSING_BD - for TSO and CSUM data
- * Frag BDs - decribes pages for frags
+ * PARSING_BD2 - for encapsulation data
+ * Frag BDs - describes pages for frags
*/
-#define BDS_PER_TX_PKT 3
+#define BDS_PER_TX_PKT 4
#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
/* max BDs per tx packet including next pages */
#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \
@@ -680,12 +871,10 @@ struct bnx2x_fastpath {
FW_DROP_LEVEL(bp))
#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
-
/* This is needed for determining of last_max */
#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
#define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b))
-
#define BNX2X_SWCID_SHIFT 17
#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1)
@@ -699,36 +888,39 @@ struct bnx2x_fastpath {
#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
-#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
+#define BNX2X_DB_SHIFT 3 /* 8 bytes*/
#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
#error "Min DB doorbell stride is 8"
#endif
-#define DPM_TRIGER_TYPE 0x40
#define DOORBELL(bp, cid, val) \
do { \
- writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
- DPM_TRIGER_TYPE); \
+ writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \
} while (0)
-
/* TX CSUM helpers */
#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \
skb->csum_offset)
#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
skb->csum_offset))
-#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
+#define pbd_tcp_flags(tcp_hdr) (ntohl(tcp_flag_word(tcp_hdr))>>16 & 0xff)
-#define XMIT_PLAIN 0
-#define XMIT_CSUM_V4 0x1
-#define XMIT_CSUM_V6 0x2
-#define XMIT_CSUM_TCP 0x4
-#define XMIT_GSO_V4 0x8
-#define XMIT_GSO_V6 0x10
+#define XMIT_PLAIN 0
+#define XMIT_CSUM_V4 (1 << 0)
+#define XMIT_CSUM_V6 (1 << 1)
+#define XMIT_CSUM_TCP (1 << 2)
+#define XMIT_GSO_V4 (1 << 3)
+#define XMIT_GSO_V6 (1 << 4)
+#define XMIT_CSUM_ENC_V4 (1 << 5)
+#define XMIT_CSUM_ENC_V6 (1 << 6)
+#define XMIT_GSO_ENC_V4 (1 << 7)
+#define XMIT_GSO_ENC_V6 (1 << 8)
-#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6)
-#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6)
+#define XMIT_CSUM_ENC (XMIT_CSUM_ENC_V4 | XMIT_CSUM_ENC_V6)
+#define XMIT_GSO_ENC (XMIT_GSO_ENC_V4 | XMIT_GSO_ENC_V6)
+#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6 | XMIT_CSUM_ENC)
+#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6 | XMIT_GSO_ENC)
/* stuff added to make the code fit 80Col */
#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
@@ -747,7 +939,6 @@ struct bnx2x_fastpath {
#define BNX2X_RX_SUM_FIX(cqe) \
BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
-
#define FP_USB_FUNC_OFF \
offsetof(struct cstorm_status_block_u, func)
#define FP_CSB_FUNC_OFF \
@@ -789,48 +980,64 @@ struct bnx2x_common {
#define CHIP_NUM_57711E 0x1650
#define CHIP_NUM_57712 0x1662
#define CHIP_NUM_57712_MF 0x1663
+#define CHIP_NUM_57712_VF 0x166f
#define CHIP_NUM_57713 0x1651
#define CHIP_NUM_57713E 0x1652
#define CHIP_NUM_57800 0x168a
#define CHIP_NUM_57800_MF 0x16a5
+#define CHIP_NUM_57800_VF 0x16a9
#define CHIP_NUM_57810 0x168e
#define CHIP_NUM_57810_MF 0x16ae
+#define CHIP_NUM_57810_VF 0x16af
#define CHIP_NUM_57811 0x163d
#define CHIP_NUM_57811_MF 0x163e
-#define CHIP_NUM_57840_OBSOLETE 0x168d
+#define CHIP_NUM_57811_VF 0x163f
+#define CHIP_NUM_57840_OBSOLETE 0x168d
#define CHIP_NUM_57840_MF_OBSOLETE 0x16ab
#define CHIP_NUM_57840_4_10 0x16a1
#define CHIP_NUM_57840_2_20 0x16a2
#define CHIP_NUM_57840_MF 0x16a4
+#define CHIP_NUM_57840_VF 0x16ad
#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712)
+#define CHIP_IS_57712_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_VF)
#define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF)
#define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800)
#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
+#define CHIP_IS_57800_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_VF)
#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810)
#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57810_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_VF)
#define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811)
#define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF)
+#define CHIP_IS_57811_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_VF)
#define CHIP_IS_57840(bp) \
((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \
(CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \
(CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE))
#define CHIP_IS_57840_MF(bp) ((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \
(CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE))
+#define CHIP_IS_57840_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_VF)
#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
CHIP_IS_57711E(bp))
+#define CHIP_IS_57811xx(bp) (CHIP_IS_57811(bp) || \
+ CHIP_IS_57811_MF(bp) || \
+ CHIP_IS_57811_VF(bp))
#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
- CHIP_IS_57712_MF(bp))
+ CHIP_IS_57712_MF(bp) || \
+ CHIP_IS_57712_VF(bp))
#define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \
CHIP_IS_57800_MF(bp) || \
+ CHIP_IS_57800_VF(bp) || \
CHIP_IS_57810(bp) || \
CHIP_IS_57810_MF(bp) || \
- CHIP_IS_57811(bp) || \
- CHIP_IS_57811_MF(bp) || \
+ CHIP_IS_57810_VF(bp) || \
+ CHIP_IS_57811xx(bp) || \
CHIP_IS_57840(bp) || \
- CHIP_IS_57840_MF(bp))
+ CHIP_IS_57840_MF(bp) || \
+ CHIP_IS_57840_VF(bp))
#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
#define USES_WARPCORE(bp) (CHIP_IS_E3(bp))
#define IS_E1H_OFFSET (!CHIP_IS_E1(bp))
@@ -865,14 +1072,14 @@ struct bnx2x_common {
#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
(CHIP_REV(bp) == CHIP_REV_Ax))
/* This define is used in two main places:
- * 1. In the early stages of nic_load, to know if to configrue Parser / Searcher
+ * 1. In the early stages of nic_load, to know if to configure Parser / Searcher
* to nic-only mode or to offload mode. Offload mode is configured if either the
* chip is E1x (where MIC_MODE register is not applicable), or if cnic already
* registered for this port (which means that the user wants storage services).
* 2. During cnic-related load, to know if offload mode is already configured in
- * the HW or needs to be configrued.
+ * the HW or needs to be configured.
* Since the transition from nic-mode to offload-mode in HW causes traffic
- * coruption, nic-mode is configured only in ports on which storage services
+ * corruption, nic-mode is configured only in ports on which storage services
* where never requested.
*/
#define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
@@ -949,13 +1156,28 @@ struct bnx2x_port {
(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
/* slow path */
+#define BNX2X_MAX_NUM_OF_VFS 64
+#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
+#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
-/* slow path work-queue */
-extern struct workqueue_struct *bnx2x_wq;
+/* We need to reserve doorbell addresses for all VF and queue combinations */
+#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
-#define BNX2X_MAX_NUM_OF_VFS 64
+/* The doorbell is configured to have the same number of CIDs for PFs and for
+ * VFs. For this reason the PF CID zone is as large as the VF zone.
+ */
+#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS
+#define BNX2X_MAX_NUM_VF_QUEUES 64
#define BNX2X_VF_ID_INVALID 0xFF
+/* the number of VF CIDS multiplied by the amount of bytes reserved for each
+ * cid must not exceed the size of the VF doorbell
+ */
+#define BNX2X_VF_BAR_SIZE 512
+#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT))
+#error "VF doorbell bar size is 512"
+#endif
+
/*
* The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
* control by the number of fast-path status blocks supported by the
@@ -968,14 +1190,14 @@ extern struct workqueue_struct *bnx2x_wq;
* If the maximum number of FP-SB available is X then:
* a. If CNIC is supported it consumes 1 FP-SB thus the max number of
* regular L2 queues is Y=X-1
- * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
+ * b. In MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
* c. If the FCoE L2 queue is supported the actual number of L2 queues
* is Y+1
* d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
* slow-path interrupts) or Y+2 if CNIC is supported (one additional
* FP interrupt context for the CNIC).
* e. The number of HW context (CID count) is always X or X+1 if FCoE
- * L2 queue is supported. the cid for the FCoE L2 queue is always X.
+ * L2 queue is supported. The cid for the FCoE L2 queue is always X.
*/
/* fast-path interrupt contexts E1x */
@@ -1005,8 +1227,9 @@ union cdu_context {
/* TM (timers) host DB constants */
#define TM_ILT_PAGE_SZ_HW 0
#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
-/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
-#define TM_CONN_NUM 1024
+#define TM_CONN_NUM (BNX2X_FIRST_VF_CID + \
+ BNX2X_VF_CIDS + \
+ CNIC_ISCSI_CID_MAX)
#define TM_ILT_SZ (8 * TM_CONN_NUM)
#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
@@ -1028,7 +1251,6 @@ struct bnx2x_slowpath {
struct eth_classify_rules_ramrod_data e2;
} mac_rdata;
-
union {
struct tstorm_eth_mac_filter_config e1x;
struct eth_filter_rules_ramrod_data e2;
@@ -1045,6 +1267,7 @@ struct bnx2x_slowpath {
union {
struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data;
+ struct tpa_update_ramrod_data tpa_data;
} q_rdata;
union {
@@ -1058,7 +1281,10 @@ struct bnx2x_slowpath {
* Therefore, if they would have been defined in the same union,
* data can get corrupted.
*/
- struct afex_vif_list_ramrod_data func_afex_rdata;
+ union {
+ struct afex_vif_list_ramrod_data viflist_data;
+ struct function_update_data func_update;
+ } func_afex_rdata;
/* used by dmae command executer */
struct dmae_command dmae[MAX_DMAE_C];
@@ -1079,7 +1305,6 @@ struct bnx2x_slowpath {
#define bnx2x_sp_mapping(bp, var) \
(bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
-
/* attn group wiring */
#define MAX_DYNAMIC_ATTN_GRPS 8
@@ -1104,6 +1329,7 @@ struct hw_context {
/* forward */
struct bnx2x_ilt;
+struct bnx2x_vfdb;
enum bnx2x_recovery_state {
BNX2X_RECOVERY_DONE,
@@ -1165,27 +1391,39 @@ struct bnx2x_fw_stats_req {
};
struct bnx2x_fw_stats_data {
- struct stats_counter storm_counters;
- struct per_port_stats port;
- struct per_pf_stats pf;
+ struct stats_counter storm_counters;
+ struct per_port_stats port;
+ struct per_pf_stats pf;
struct fcoe_statistics_params fcoe;
- struct per_queue_stats queue_stats[1];
+ struct per_queue_stats queue_stats[1];
};
/* Public slow path states */
-enum {
+enum sp_rtnl_flag {
BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT,
- BNX2X_SP_RTNL_AFEX_F_UPDATE,
BNX2X_SP_RTNL_FAN_FAILURE,
+ BNX2X_SP_RTNL_AFEX_F_UPDATE,
+ BNX2X_SP_RTNL_ENABLE_SRIOV,
+ BNX2X_SP_RTNL_VFPF_MCAST,
+ BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+ BNX2X_SP_RTNL_RX_MODE,
+ BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ BNX2X_SP_RTNL_TX_STOP,
+ BNX2X_SP_RTNL_GET_DRV_VERSION,
};
+enum bnx2x_iov_flag {
+ BNX2X_IOV_HANDLE_VF_MSG,
+ BNX2X_IOV_HANDLE_FLR,
+};
struct bnx2x_prev_path_list {
+ struct list_head list;
u8 bus;
u8 slot;
u8 path;
- struct list_head list;
+ u8 aer;
u8 undi;
};
@@ -1231,6 +1469,25 @@ struct bnx2x {
(vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
+#ifdef CONFIG_BNX2X_SRIOV
+ /* protects vf2pf mailbox from simultaneous access */
+ struct mutex vf2pf_mutex;
+ /* vf pf channel mailbox contains request and response buffers */
+ struct bnx2x_vf_mbx_msg *vf2pf_mbox;
+ dma_addr_t vf2pf_mbox_mapping;
+
+ /* we set aside a copy of the acquire response */
+ struct pfvf_acquire_resp_tlv acquire_resp;
+
+ /* bulletin board for messages from pf to vf */
+ union pf_vf_bulletin *pf2vf_bulletin;
+ dma_addr_t pf2vf_bulletin_mapping;
+
+ struct pf_vf_bulletin_content old_bulletin;
+
+ u16 requested_nr_virtfn;
+#endif /* CONFIG_BNX2X_SRIOV */
+
struct net_device *dev;
struct pci_dev *pdev;
@@ -1295,8 +1552,6 @@ struct bnx2x {
__le16 *eq_cons_sb;
atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
-
-
/* Counter for marking that there is a STAT_QUERY ramrod pending */
u16 stats_pending;
/* Counter for completed statistics ramrods */
@@ -1312,14 +1567,11 @@ struct bnx2x {
#define PCI_32BIT_FLAG (1 << 1)
#define ONE_PORT_FLAG (1 << 2)
#define NO_WOL_FLAG (1 << 3)
-#define USING_DAC_FLAG (1 << 4)
#define USING_MSIX_FLAG (1 << 5)
#define USING_MSI_FLAG (1 << 6)
#define DISABLE_MSI_FLAG (1 << 7)
#define TPA_ENABLE_FLAG (1 << 8)
#define NO_MCP_FLAG (1 << 9)
-
-#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
#define GRO_ENABLE_FLAG (1 << 10)
#define MF_FUNC_DIS (1 << 11)
#define OWN_CNIC_IRQ (1 << 12)
@@ -1327,9 +1579,25 @@ struct bnx2x {
#define NO_ISCSI_FLAG (1 << 14)
#define NO_FCOE_FLAG (1 << 15)
#define BC_SUPPORTS_PFC_STATS (1 << 17)
+#define TX_SWITCHING (1 << 18)
#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
#define USING_SINGLE_MSIX_FLAG (1 << 20)
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
+#define IS_VF_FLAG (1 << 22)
+#define INTERRUPTS_ENABLED_FLAG (1 << 23)
+#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
+#define HAS_PHYS_PORT_ID (1 << 25)
+#define AER_ENABLED (1 << 26)
+
+#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
+
+#ifdef CONFIG_BNX2X_SRIOV
+#define IS_VF(bp) ((bp)->flags & IS_VF_FLAG)
+#define IS_PF(bp) (!((bp)->flags & IS_VF_FLAG))
+#else
+#define IS_VF(bp) false
+#define IS_PF(bp) true
+#endif
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1345,10 +1613,12 @@ struct bnx2x {
*/
bool fcoe_init;
- int pm_cap;
int mrrs;
struct delayed_work sp_task;
+ struct delayed_work iov_task;
+
+ atomic_t interrupt_occurred;
struct delayed_work sp_rtnl_task;
struct delayed_work period_task;
@@ -1395,7 +1665,7 @@ struct bnx2x {
u16 rx_ticks_int;
u16 rx_ticks;
/* Maximal coalescing timeout in us */
-#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
+#define BNX2X_MAX_COALESCE_TOUT (0xff*BNX2X_BTR)
u32 lin_cnt;
@@ -1432,11 +1702,16 @@ struct bnx2x {
u8 igu_sb_cnt;
u8 min_msix_vec_cnt;
+ u32 igu_base_addr;
dma_addr_t def_status_blk_mapping;
struct bnx2x_slowpath *slowpath;
dma_addr_t slowpath_mapping;
+ /* Mechanism protecting the drv_info_to_mcp */
+ struct mutex drv_info_mutex;
+ bool drv_info_mng_owner;
+
/* Total number of FW statistics requests */
u8 fw_stats_num;
@@ -1463,10 +1738,10 @@ struct bnx2x {
dma_addr_t fw_stats_data_mapping;
int fw_stats_data_sz;
- /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
+ /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB
* context size we need 8 ILT entries.
*/
-#define ILT_MAX_L2_LINES 8
+#define ILT_MAX_L2_LINES 32
struct hw_context context[ILT_MAX_L2_LINES];
struct bnx2x_ilt *ilt;
@@ -1482,10 +1757,11 @@ struct bnx2x {
* Maximum CID count that might be required by the bnx2x:
* Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
*/
+
#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
- + 2 * CNIC_SUPPORT(bp))
+ + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
- + 2 * CNIC_SUPPORT(bp))
+ + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
ILT_PAGE_CIDS))
@@ -1511,7 +1787,7 @@ struct bnx2x {
struct mutex cnic_mutex;
struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
- /* Start index of the "special" (CNIC related) L2 cleints */
+ /* Start index of the "special" (CNIC related) L2 clients */
u8 cnic_base_cl_id;
int dmae_ready;
@@ -1580,6 +1856,9 @@ struct bnx2x {
char fw_ver[32];
const struct firmware *firmware;
+ struct bnx2x_vfdb *vfdb;
+#define IS_SRIOV(bp) ((bp)->vfdb)
+
/* DCB support on/off */
u16 dcb_state;
#define BNX2X_DCB_STATE_OFF 0
@@ -1599,6 +1878,10 @@ struct bnx2x {
int dcb_version;
/* CAM credit pools */
+
+ /* used only in sriov */
+ struct bnx2x_credit_pool_obj vlans_pool;
+
struct bnx2x_credit_pool_obj macs_pool;
/* RX_MODE object */
@@ -1618,7 +1901,10 @@ struct bnx2x {
/* operation indication for the sp_rtnl task */
unsigned long sp_rtnl_state;
- /* DCBX Negotation results */
+ /* Indication of the IOV tasks */
+ unsigned long iov_task_state;
+
+ /* DCBX Negotiation results */
struct dcbx_features dcbx_local_feat;
u32 dcbx_error;
@@ -1636,6 +1922,13 @@ struct bnx2x {
/* priority to cos mapping */
u8 prio_to_cos[8];
+
+ int fp_array_size;
+ u32 dump_preset_idx;
+ bool stats_started;
+ struct semaphore stats_sema;
+
+ u8 phys_port_id[ETH_ALEN];
};
/* Tx queues may be less or equal to Rx queues */
@@ -1670,8 +1963,7 @@ extern int num_queues;
#define FUNC_FLG_TPA 0x0008
#define FUNC_FLG_SPQ 0x0010
#define FUNC_FLG_LEADING 0x0020 /* PF only */
-
-
+#define FUNC_FLG_LEADING_STATS 0x0040
struct bnx2x_func_init_params {
/* dma */
dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
@@ -1769,9 +2061,6 @@ struct bnx2x_func_init_params {
#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
-
-
-
/**
* bnx2x_set_mac_one - configure a single MAC address
*
@@ -1813,12 +2102,15 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
/* Init Function API */
void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
+void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+ u8 vf_valid, int fw_sb_id, int igu_sb_id);
int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
void bnx2x_read_mf_cfg(struct bnx2x *bp);
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
/* dmae */
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
@@ -1830,6 +2122,18 @@ u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
bool with_comp, u8 comp_type);
+void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u8 src_type, u8 dst_type);
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u32 *comp);
+
+/* FLR related routines */
+u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
+void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count);
+int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt);
+u8 bnx2x_is_pcie_pending(struct pci_dev *dev);
+int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+ char *msg, u32 poll_cnt);
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
@@ -1837,6 +2141,8 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
void bnx2x_update_coalesce(struct bnx2x *bp);
int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
+bool bnx2x_port_after_undi(struct bnx2x *bp);
+
static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
int wait)
{
@@ -1854,12 +2160,11 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
return val;
}
-#define BNX2X_ILT_ZALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x) \
- memset(x, 0, size); \
- } while (0)
+void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
+ bool is_pf);
+
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+ x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
#define BNX2X_ILT_FREE(x, y, size) \
do { \
@@ -1898,7 +2203,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define UNLOAD_CLOSE 1
#define UNLOAD_RECOVERY 2
-
/* DMAE command defines */
#define DMAE_TIMEOUT -1
#define DMAE_PCI_ERROR -2 /* E2 and onward */
@@ -1962,7 +2266,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit
- indicates eror */
+ * indicates error
+ */
#define MAX_DMAE_C_PER_PORT 8
#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
@@ -1979,7 +2284,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_NUM_TESTS_SF 7
#define BNX2X_NUM_TESTS_MF 3
#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
- BNX2X_NUM_TESTS_SF)
+ IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF)
#define BNX2X_PHY_LOOPBACK 0
#define BNX2X_MAC_LOOPBACK 1
@@ -1990,10 +2295,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
BNX2X_PHY_LOOPBACK_FAILED)
-
#define STROM_ASSERT_ARRAY_SIZE 50
-
/* must be used on a CID before placing it on a HW ring */
#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
(BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
@@ -2002,7 +2305,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
-
#define BNX2X_BTR 4
#define MAX_SPQ_PENDING 8
@@ -2024,7 +2326,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
/* Memory of fairness algorithm . 2 cycles */
#define FAIR_MEM 2
-
#define ATTN_NIG_FOR_FUNC (1L << 8)
#define ATTN_SW_TIMER_4_FUNC (1L << 9)
#define GPIO_2_FUNC (1L << 10)
@@ -2040,6 +2341,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define ATTN_HARD_WIRED_MASK 0xff00
#define ATTENTION_ID 4
+#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_SD(bp) || \
+ IS_MF_FCOE_AFEX(bp))
/* stuff added to make the code fit 80Col */
@@ -2067,6 +2370,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
(AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
@@ -2128,7 +2432,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define MULTI_MASK 0x7f
-
#define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func)
#define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func)
#define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func)
@@ -2156,18 +2459,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
(&bp->def_status_blk->sp_sb.\
index_values[HC_SP_INDEX_ETH_DEF_CONS])
-#define SET_FLAG(value, mask, flag) \
- do {\
- (value) &= ~(mask);\
- (value) |= ((flag) << (mask##_SHIFT));\
- } while (0)
-
-#define GET_FLAG(value, mask) \
- (((value) & (mask)) >> (mask##_SHIFT))
-
-#define GET_FIELD(value, fname) \
- (((value) & (fname##_MASK)) >> (fname##_SHIFT))
-
#define CAM_IS_INVALID(x) \
(GET_FLAG(x.flags, \
MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
@@ -2178,7 +2469,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
-
#ifndef PXP2_REG_PXP2_INT_STS
#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
#endif
@@ -2190,9 +2480,17 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
+#define VF_ACQUIRE_THRESH 3
+#define VF_ACQUIRE_MAC_FILTERS 1
+#define VF_ACQUIRE_MC_FILTERS 10
+
+#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
+ (!((me_reg) & ME_REG_VF_ERR)))
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err);
+
/* Congestion management fairness mode */
-#define CMNG_FNS_NONE 0
-#define CMNG_FNS_MINMAX 1
+#define CMNG_FNS_NONE 0
+#define CMNG_FNS_MINMAX 1
#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
#define HC_SEG_ACCESS_ATTN 4
@@ -2205,10 +2503,9 @@ static const u32 dmae_reg_go_c[] = {
DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
};
-void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev);
void bnx2x_notify_link_changed(struct bnx2x *bp);
-
#define BNX2X_MF_SD_PROTOCOL(bp) \
((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
@@ -2229,6 +2526,18 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
(BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
+#define SET_FLAG(value, mask, flag) \
+ do {\
+ (value) &= ~(mask);\
+ (value) |= ((flag) << (mask##_SHIFT));\
+ } while (0)
+
+#define GET_FLAG(value, mask) \
+ (((value) & (mask)) >> (mask##_SHIFT))
+
+#define GET_FIELD(value, fname) \
+ (((value) & (fname##_MASK)) >> (fname##_SHIFT))
+
enum {
SWITCH_UPDATE,
AFEX_UPDATE,
@@ -2236,4 +2545,13 @@ enum {
#define NUM_MACS 8
+void bnx2x_set_local_cmng(struct bnx2x *bp);
+
+void bnx2x_update_mng_version(struct bnx2x *bp);
+
+#define MCPR_SCRATCH_BASE(bp) \
+ (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+
+#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 01588b66a38..c43e7238de2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1,12 +1,12 @@
/* bnx2x_cmn.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -21,14 +21,55 @@
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
+#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
+#include <net/busy_poll.h>
#include <linux/prefetch.h>
#include "bnx2x_cmn.h"
#include "bnx2x_init.h"
#include "bnx2x_sp.h"
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
+static int bnx2x_poll(struct napi_struct *napi, int budget);
+static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_rx_queue_cnic(bp, i) {
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+ bnx2x_poll, NAPI_POLL_WEIGHT);
+ napi_hash_add(&bnx2x_fp(bp, i, napi));
+ }
+}
+
+static void bnx2x_add_all_napi(struct bnx2x *bp)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_eth_queue(bp, i) {
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+ bnx2x_poll, NAPI_POLL_WEIGHT);
+ napi_hash_add(&bnx2x_fp(bp, i, napi));
+ }
+}
+
+static int bnx2x_calc_num_queues(struct bnx2x *bp)
+{
+ int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
+
+ /* Reduce memory usage in kdump environment by using only one queue */
+ if (reset_devices)
+ nq = 1;
+
+ nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
+ return nq;
+}
/**
* bnx2x_move_fp - move content of the fastpath structure.
@@ -53,6 +94,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
int old_max_eth_txqs, new_max_eth_txqs;
int old_txdata_index = 0, new_txdata_index = 0;
+ struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
/* Copy the NAPI object as it has been already initialized */
from_fp->napi = to_fp->napi;
@@ -61,6 +103,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
memcpy(to_fp, from_fp, sizeof(*to_fp));
to_fp->index = to;
+ /* Retain the tpa_info of the original `to' version as we don't want
+ * 2 FPs to contain the same tpa_info pointer.
+ */
+ to_fp->tpa_info = old_tpa_info;
+
/* move sp_objs contents as well, as their indices match fp ones */
memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
@@ -80,13 +127,66 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
}
- memcpy(&bp->bnx2x_txq[old_txdata_index],
- &bp->bnx2x_txq[new_txdata_index],
+ memcpy(&bp->bnx2x_txq[new_txdata_index],
+ &bp->bnx2x_txq[old_txdata_index],
sizeof(struct bnx2x_fp_txdata));
to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
}
-int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
+/**
+ * bnx2x_fill_fw_str - Fill buffer with FW version string.
+ *
+ * @bp: driver handle
+ * @buf: character buffer to fill with the fw name
+ * @buf_len: length of the above buffer
+ *
+ */
+void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
+{
+ if (IS_PF(bp)) {
+ u8 phy_fw_ver[PHY_FW_VER_LEN];
+
+ phy_fw_ver[0] = '\0';
+ bnx2x_get_ext_phy_fw_version(&bp->link_params,
+ phy_fw_ver, PHY_FW_VER_LEN);
+ strlcpy(buf, bp->fw_ver, buf_len);
+ snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+ "bc %d.%d.%d%s%s",
+ (bp->common.bc_ver & 0xff0000) >> 16,
+ (bp->common.bc_ver & 0xff00) >> 8,
+ (bp->common.bc_ver & 0xff),
+ ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
+ } else {
+ bnx2x_vf_fill_fw_str(bp, buf, buf_len);
+ }
+}
+
+/**
+ * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
+ *
+ * @bp: driver handle
+ * @delta: number of eth queues which were not allocated
+ */
+static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
+{
+ int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
+
+ /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
+ * backward along the array could cause memory to be overridden
+ */
+ for (cos = 1; cos < bp->max_cos; cos++) {
+ for (i = 0; i < old_eth_num - delta; i++) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ int new_idx = cos * (old_eth_num - delta) + i;
+
+ memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
+ sizeof(struct bnx2x_fp_txdata));
+ fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
+ }
+ }
+}
+
+int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
/* free skb in the packet ring at pos idx
* return idx of last bd freed
@@ -101,6 +201,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
struct sk_buff *skb = tx_buf->skb;
u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
int nbd;
+ u16 split_bd_len = 0;
/* prefetch skb end pointer to speedup dev_kfree_skb() */
prefetch(&skb->end);
@@ -108,11 +209,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
txdata->txq_index, idx, tx_buf, skb);
- /* unmap first bd */
tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
- dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
- BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
-
nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
#ifdef BNX2X_STOP_ON_ERROR
@@ -130,12 +227,25 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
- /* ...and the TSO split header bd since they have no mapping */
+ if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
+ /* Skip second parse bd... */
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ }
+
+ /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+ tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+ split_bd_len = BD_UNMAP_LEN(tx_data_bd);
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
}
+ /* unmap first bd */
+ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+ BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
+ DMA_TO_DEVICE);
+
/* now free frags */
while (nbd > 0) {
@@ -185,7 +295,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
txdata->txq_index, hw_cons, sw_cons, pkt_cons);
bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
- &pkts_compl, &bytes_compl);
+ &pkts_compl, &bytes_compl);
sw_cons++;
}
@@ -207,7 +317,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
smp_mb();
if (unlikely(netif_tx_queue_stopped(txq))) {
- /* Taking tx_lock() is needed to prevent reenabling the queue
+ /* Taking tx_lock() is needed to prevent re-enabling the queue
* while it's empty. This could have happen if rx_action() gets
* suspended in bnx2x_tx_int() after the condition before
* netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
@@ -291,24 +401,26 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
fp->last_max_sge, fp->rx_sge_prod);
}
-/* Set Toeplitz hash value in the skb using the value from the
+/* Get Toeplitz hash value in the skb using the value from the
* CQE (calculated by HW).
*/
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
const struct eth_fast_path_rx_cqe *cqe,
- bool *l4_rxhash)
+ enum pkt_hash_types *rxhash_type)
{
- /* Set Toeplitz hash from CQE */
+ /* Get Toeplitz hash from CQE */
if ((bp->dev->features & NETIF_F_RXHASH) &&
(cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
enum eth_rss_hash_type htype;
htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
- *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
- (htype == TCP_IPV6_HASH_TYPE);
+ *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
+ (htype == TCP_IPV6_HASH_TYPE)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
+
return le32_to_cpu(cqe->rss_hash_result);
}
- *l4_rxhash = false;
+ *rxhash_type = PKT_HASH_TYPE_NONE;
return 0;
}
@@ -362,11 +474,10 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->tpa_state = BNX2X_TPA_START;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
- tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
- tpa_info->full_page =
- SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
+ tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
tpa_info->gro_size = gro_size;
}
@@ -387,31 +498,35 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
*/
#define TPA_TSTAMP_OPT_LEN 12
/**
- * bnx2x_set_lro_mss - calculate the approximate value of the MSS
+ * bnx2x_set_gro_params - compute GRO values
*
- * @bp: driver handle
+ * @skb: packet skb
* @parsing_flags: parsing flags from the START CQE
* @len_on_bd: total length of the first packet for the
* aggregation.
+ * @pkt_len: length of all segments
*
* Approximate value of the MSS for this aggregation calculated using
* the first packet of it.
+ * Compute number of aggregated segments, and gso_type.
*/
-static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
- u16 len_on_bd)
+static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
+ u16 len_on_bd, unsigned int pkt_len,
+ u16 num_of_coalesced_segs)
{
- /*
- * TPA arrgregation won't have either IP options or TCP options
+ /* TPA aggregation won't have either IP options or TCP options
* other than timestamp or IPv6 extension headers.
*/
u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
- PRS_FLAG_OVERETH_IPV6)
+ PRS_FLAG_OVERETH_IPV6) {
hdrs_len += sizeof(struct ipv6hdr);
- else /* IPv4 */
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ } else {
hdrs_len += sizeof(struct iphdr);
-
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
/* Check if there was a TCP timestamp, if there is it's will
* always be 12 bytes length: nop nop kind length echo val.
@@ -421,13 +536,18 @@ static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
hdrs_len += TPA_TSTAMP_OPT_LEN;
- return len_on_bd - hdrs_len;
+ skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
}
-static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
+static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 index, gfp_t gfp_mask)
{
- struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
+ struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
dma_addr_t mapping;
@@ -438,7 +558,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
}
mapping = dma_map_page(&bp->pdev->dev, page, 0,
- SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+ SGE_PAGES, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT);
BNX2X_ERR("Can't map sge\n");
@@ -475,22 +595,13 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
/* This is needed in order to enable forwarding support */
- if (frag_size) {
- skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
- tpa_info->parsing_flags, len_on_bd);
-
- /* set for GRO */
- if (fp->mode == TPA_MODE_GRO)
- skb_shinfo(skb)->gso_type =
- (GET_FLAG(tpa_info->parsing_flags,
- PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
- PRS_FLAG_OVERETH_IPV6) ?
- SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
- }
-
+ if (frag_size)
+ bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
+ le16_to_cpu(cqe->pkt_len),
+ le16_to_cpu(cqe->num_of_coalesced_segs));
#ifdef BNX2X_STOP_ON_ERROR
- if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
+ if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
pages, cqe_idx);
BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
@@ -508,24 +619,23 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (fp->mode == TPA_MODE_GRO)
frag_len = min_t(u32, frag_size, (u32)full_page);
else /* LRO */
- frag_len = min_t(u32, frag_size,
- (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
+ frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
rx_pg = &fp->rx_page_ring[sge_idx];
old_rx_pg = *rx_pg;
/* If we fail to allocate a substitute page, we simply stop
where we are and drop the whole packet */
- err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
+ err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
if (unlikely(err)) {
bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
return err;
}
- /* Unmap the page as we r going to pass it to the stack */
+ /* Unmap the page as we're going to pass it to the stack */
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(&old_rx_pg, mapping),
- SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+ SGE_PAGES, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */
if (fp->mode == TPA_MODE_LRO)
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -543,7 +653,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
skb->data_len += frag_len;
- skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
+ skb->truesize += SGE_PAGES;
skb->len += frag_len;
frag_size -= frag_len;
@@ -560,14 +670,74 @@ static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
kfree(data);
}
-static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
+static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
{
- if (fp->rx_frag_size)
+ if (fp->rx_frag_size) {
+ /* GFP_KERNEL allocations are used only during initialization */
+ if (unlikely(gfp_mask & __GFP_WAIT))
+ return (void *)__get_free_page(gfp_mask);
+
return netdev_alloc_frag(fp->rx_frag_size);
+ }
- return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+ return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
}
+#ifdef CONFIG_INET
+static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+ iph->saddr, iph->daddr, 0);
+}
+
+static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
+{
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+ &iph->saddr, &iph->daddr, 0);
+}
+
+static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
+ void (*gro_func)(struct bnx2x*, struct sk_buff*))
+{
+ skb_set_network_header(skb, 0);
+ gro_func(bp, skb);
+ tcp_gro_complete(skb);
+}
+#endif
+
+static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ if (skb_shinfo(skb)->gso_size) {
+ switch (be16_to_cpu(skb->protocol)) {
+ case ETH_P_IP:
+ bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
+ break;
+ case ETH_P_IPV6:
+ bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
+ break;
+ default:
+ BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ be16_to_cpu(skb->protocol));
+ }
+ }
+#endif
+ skb_record_rx_queue(skb, fp->rx_queue);
+ napi_gro_receive(&fp->napi, skb);
+}
static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct bnx2x_agg_info *tpa_info,
@@ -591,7 +761,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
goto drop;
/* Try to allocate the new data */
- new_data = bnx2x_frag_alloc(fp);
+ new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
@@ -612,8 +782,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb_reserve(skb, pad + NET_SKB_PAD);
skb_put(skb, len);
- skb->rxhash = tpa_info->rxhash;
- skb->l4_rxhash = tpa_info->l4_rxhash;
+ skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -621,21 +790,21 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
skb, cqe, cqe_idx)) {
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
- __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
- napi_gro_receive(&fp->napi, skb);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
+ bnx2x_gro_receive(bp, fp, skb);
} else {
DP(NETIF_MSG_RX_STATUS,
"Failed to allocate new pages - dropping packet!\n");
dev_kfree_skb_any(skb);
}
-
/* put new data in bin */
rx_buf->data = new_data;
return;
}
- bnx2x_frag_free(fp, new_data);
+ if (new_data)
+ bnx2x_frag_free(fp, new_data);
drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
@@ -643,15 +812,15 @@ drop:
bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
}
-static int bnx2x_alloc_rx_data(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
+static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 index, gfp_t gfp_mask)
{
u8 *data;
struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
dma_addr_t mapping;
- data = bnx2x_frag_alloc(fp);
+ data = bnx2x_frag_alloc(fp, gfp_mask);
if (unlikely(data == NULL))
return -ENOMEM;
@@ -697,23 +866,21 @@ void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
-int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
struct bnx2x *bp = fp->bp;
u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
- u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
+ u16 sw_comp_cons, sw_comp_prod;
int rx_pkt = 0;
+ union eth_rx_cqe *cqe;
+ struct eth_fast_path_rx_cqe *cqe_fp;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
return 0;
#endif
-
- /* CQ "next element" is of the size of the regular element,
- that's why it's ok here */
- hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
- if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
- hw_comp_cons++;
+ if (budget <= 0)
+ return rx_pkt;
bd_cons = fp->rx_bd_cons;
bd_prod = fp->rx_bd_prod;
@@ -721,37 +888,43 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
sw_comp_cons = fp->rx_comp_cons;
sw_comp_prod = fp->rx_comp_prod;
- /* Memory barrier necessary as speculative reads of the rx
- * buffer can be ahead of the index in the status block
- */
- rmb();
+ comp_ring_cons = RCQ_BD(sw_comp_cons);
+ cqe = &fp->rx_comp_ring[comp_ring_cons];
+ cqe_fp = &cqe->fast_path_cqe;
DP(NETIF_MSG_RX_STATUS,
- "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
- fp->index, hw_comp_cons, sw_comp_cons);
+ "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
- while (sw_comp_cons != hw_comp_cons) {
+ while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
struct sw_rx_bd *rx_buf = NULL;
struct sk_buff *skb;
- union eth_rx_cqe *cqe;
- struct eth_fast_path_rx_cqe *cqe_fp;
u8 cqe_fp_flags;
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad, queue;
u8 *data;
- bool l4_rxhash;
+ u32 rxhash;
+ enum pkt_hash_types rxhash_type;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
return 0;
#endif
- comp_ring_cons = RCQ_BD(sw_comp_cons);
bd_prod = RX_BD(bd_prod);
bd_cons = RX_BD(bd_cons);
- cqe = &fp->rx_comp_ring[comp_ring_cons];
- cqe_fp = &cqe->fast_path_cqe;
+ /* A rmb() is required to ensure that the CQE is not read
+ * before it is written by the adapter DMA. PCI ordering
+ * rules will make sure the other fields are written before
+ * the marker at the end of struct eth_fast_path_rx_cqe
+ * but without rmb() a weakly ordered processor can process
+ * stale data. Without the barrier TPA state-machine might
+ * enter inconsistent state and kernel stack might be
+ * provided with incorrect packet description - these lead
+ * to various kernel crashed.
+ */
+ rmb();
+
cqe_fp_flags = cqe_fp->type_error_flags;
cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
@@ -795,7 +968,6 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
cqe_fp);
goto next_rx;
-
}
queue = cqe->end_agg_cqe.queue_index;
tpa_info = &fp->tpa_info[queue];
@@ -856,7 +1028,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
memcpy(skb->data, data + pad, len);
bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
} else {
- if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
+ if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
+ GFP_ATOMIC) == 0)) {
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size,
@@ -883,8 +1056,8 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
/* Set Toeplitz hash for a none-LRO skb */
- skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
- skb->l4_rxhash = l4_rxhash;
+ rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
+ skb_set_hash(skb, rxhash, rxhash_type);
skb_checksum_none_assert(skb);
@@ -896,11 +1069,15 @@ reuse_rx:
if (le16_to_cpu(cqe_fp->pars_flags.flags) &
PARSING_FLAGS_VLAN)
- __vlan_hwaccel_put_tag(skb,
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(cqe_fp->vlan_tag));
- napi_gro_receive(&fp->napi, skb);
+ skb_mark_napi_id(skb, &fp->napi);
+ if (bnx2x_fp_ll_polling(fp))
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&fp->napi, skb);
next_rx:
rx_buf->data = NULL;
@@ -912,8 +1089,15 @@ next_cqe:
sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
+ /* mark CQE as free */
+ BNX2X_SEED_CQE(cqe_fp);
+
if (rx_pkt == budget)
break;
+
+ comp_ring_cons = RCQ_BD(sw_comp_cons);
+ cqe = &fp->rx_comp_ring[comp_ring_cons];
+ cqe_fp = &cqe->fast_path_cqe;
} /* while */
fp->rx_bd_cons = bd_cons;
@@ -940,6 +1124,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
DP(NETIF_MSG_INTR,
"got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
fp->index, fp->fw_sb_id, fp->igu_sb_id);
+
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
#ifdef BNX2X_STOP_ON_ERROR
@@ -948,8 +1133,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
#endif
/* Handle Rx and Tx according to MSI-X vector */
- prefetch(fp->rx_cons_sb);
-
for_each_cos_in_tx_queue(fp, cos)
prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
@@ -1013,7 +1196,7 @@ static void bnx2x_fill_report_data(struct bnx2x *bp,
memset(data, 0, sizeof(*data));
- /* Fill the report data: efective line speed */
+ /* Fill the report data: effective line speed */
data->line_speed = line_speed;
/* Link is down */
@@ -1056,7 +1239,7 @@ void bnx2x_link_report(struct bnx2x *bp)
*
* @bp: driver handle
*
- * None atomic inmlementation.
+ * None atomic implementation.
* Should be called under the phy_lock.
*/
void __bnx2x_link_report(struct bnx2x *bp)
@@ -1064,7 +1247,7 @@ void __bnx2x_link_report(struct bnx2x *bp)
struct bnx2x_link_report_data cur_data;
/* reread mf_cfg */
- if (!CHIP_IS_E1(bp))
+ if (IS_PF(bp) && !CHIP_IS_E1(bp))
bnx2x_read_mf_cfg(bp);
/* Read the current link report info */
@@ -1199,14 +1382,15 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
"mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
if (!fp->disable_tpa) {
- /* Fill the per-aggregtion pool */
+ /* Fill the per-aggregation pool */
for (i = 0; i < MAX_AGG_QS(bp); i++) {
struct bnx2x_agg_info *tpa_info =
&fp->tpa_info[i];
struct sw_rx_bd *first_buf =
&tpa_info->first_buf;
- first_buf->data = bnx2x_frag_alloc(fp);
+ first_buf->data =
+ bnx2x_frag_alloc(fp, GFP_KERNEL);
if (!first_buf->data) {
BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
j);
@@ -1228,7 +1412,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
for (i = 0, ring_prod = 0;
i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
- if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
+ if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
+ GFP_KERNEL) < 0) {
BNX2X_ERR("was only able to allocate %d rx sges\n",
i);
BNX2X_ERR("disabling TPA for queue[%d]\n",
@@ -1365,7 +1550,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
}
}
-void bnx2x_free_skbs_cnic(struct bnx2x *bp)
+static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
{
bnx2x_free_tx_skbs_cnic(bp);
bnx2x_free_rx_skbs_cnic(bp);
@@ -1406,10 +1591,14 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
if (nvecs == offset)
return;
- free_irq(bp->msix_table[offset].vector, bp->dev);
- DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
- bp->msix_table[offset].vector);
- offset++;
+
+ /* VFs don't have a default SB */
+ if (IS_PF(bp)) {
+ free_irq(bp->msix_table[offset].vector, bp->dev);
+ DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
+ bp->msix_table[offset].vector);
+ offset++;
+ }
if (CNIC_SUPPORT(bp)) {
if (nvecs == offset)
@@ -1430,21 +1619,30 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
void bnx2x_free_irq(struct bnx2x *bp)
{
if (bp->flags & USING_MSIX_FLAG &&
- !(bp->flags & USING_SINGLE_MSIX_FLAG))
- bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
- CNIC_SUPPORT(bp) + 1);
- else
+ !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
+ int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
+
+ /* vfs don't have a default status block */
+ if (IS_PF(bp))
+ nvecs++;
+
+ bnx2x_free_msix_irqs(bp, nvecs);
+ } else {
free_irq(bp->dev->irq, bp->dev);
+ }
}
int bnx2x_enable_msix(struct bnx2x *bp)
{
- int msix_vec = 0, i, rc, req_cnt;
+ int msix_vec = 0, i, rc;
- bp->msix_table[msix_vec].entry = msix_vec;
- BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
- bp->msix_table[0].entry);
- msix_vec++;
+ /* VFs don't have a default status block */
+ if (IS_PF(bp)) {
+ bp->msix_table[msix_vec].entry = msix_vec;
+ BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
+ bp->msix_table[0].entry);
+ msix_vec++;
+ }
/* Cnic requires an msix vector for itself */
if (CNIC_SUPPORT(bp)) {
@@ -1462,38 +1660,19 @@ int bnx2x_enable_msix(struct bnx2x *bp)
msix_vec++;
}
- req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
-
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
+ DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
+ msix_vec);
+ rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
+ BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
/*
* reconfigure number of tx/rx queues according to available
* MSI-X vectors
*/
- if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
- /* how less vectors we will have? */
- int diff = req_cnt - rc;
-
- BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
-
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
-
- if (rc) {
- BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
- goto no_msix;
- }
- /*
- * decrease number of queues by number of unallocated entries
- */
- bp->num_ethernet_queues -= diff;
- bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
-
- BNX2X_DEV_INFO("New queue configuration set: %d\n",
- bp->num_queues);
- } else if (rc > 0) {
+ if (rc == -ENOSPC) {
/* Get by with single vector */
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
- if (rc) {
+ rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
+ if (rc < 0) {
BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
rc);
goto no_msix;
@@ -1506,8 +1685,22 @@ int bnx2x_enable_msix(struct bnx2x *bp)
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
} else if (rc < 0) {
- BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
+ BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
goto no_msix;
+ } else if (rc < msix_vec) {
+ /* how less vectors we will have? */
+ int diff = msix_vec - rc;
+
+ BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
+
+ /*
+ * decrease number of queues by number of unallocated entries
+ */
+ bp->num_ethernet_queues -= diff;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+
+ BNX2X_DEV_INFO("New queue configuration set: %d\n",
+ bp->num_queues);
}
bp->flags |= USING_MSIX_FLAG;
@@ -1526,12 +1719,15 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
{
int i, rc, offset = 0;
- rc = request_irq(bp->msix_table[offset++].vector,
- bnx2x_msix_sp_int, 0,
- bp->dev->name, bp->dev);
- if (rc) {
- BNX2X_ERR("request sp irq failed\n");
- return -EBUSY;
+ /* no default status block for vf */
+ if (IS_PF(bp)) {
+ rc = request_irq(bp->msix_table[offset++].vector,
+ bnx2x_msix_sp_int, 0,
+ bp->dev->name, bp->dev);
+ if (rc) {
+ BNX2X_ERR("request sp irq failed\n");
+ return -EBUSY;
+ }
}
if (CNIC_SUPPORT(bp))
@@ -1555,12 +1751,20 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
}
i = BNX2X_NUM_ETH_QUEUES(bp);
- offset = 1 + CNIC_SUPPORT(bp);
- netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
- bp->msix_table[0].vector,
- 0, bp->msix_table[offset].vector,
- i - 1, bp->msix_table[offset + i - 1].vector);
-
+ if (IS_PF(bp)) {
+ offset = 1 + CNIC_SUPPORT(bp);
+ netdev_info(bp->dev,
+ "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
+ bp->msix_table[0].vector,
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
+ } else {
+ offset = CNIC_SUPPORT(bp);
+ netdev_info(bp->dev,
+ "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
+ }
return 0;
}
@@ -1605,7 +1809,6 @@ static int bnx2x_setup_irqs(struct bnx2x *bp)
if (rc)
return rc;
} else {
- bnx2x_ack_int(bp);
rc = bnx2x_req_irq(bp);
if (rc) {
BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
@@ -1630,32 +1833,42 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
{
int i;
- for_each_rx_queue_cnic(bp, i)
+ for_each_rx_queue_cnic(bp, i) {
+ bnx2x_fp_init_lock(&bp->fp[i]);
napi_enable(&bnx2x_fp(bp, i, napi));
+ }
}
static void bnx2x_napi_enable(struct bnx2x *bp)
{
int i;
- for_each_eth_queue(bp, i)
+ for_each_eth_queue(bp, i) {
+ bnx2x_fp_init_lock(&bp->fp[i]);
napi_enable(&bnx2x_fp(bp, i, napi));
+ }
}
static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
{
int i;
- for_each_rx_queue_cnic(bp, i)
+ for_each_rx_queue_cnic(bp, i) {
napi_disable(&bnx2x_fp(bp, i, napi));
+ while (!bnx2x_fp_ll_disable(&bp->fp[i]))
+ usleep_range(1000, 2000);
+ }
}
static void bnx2x_napi_disable(struct bnx2x *bp)
{
int i;
- for_each_eth_queue(bp, i)
+ for_each_eth_queue(bp, i) {
napi_disable(&bnx2x_fp(bp, i, napi));
+ while (!bnx2x_fp_ll_disable(&bp->fp[i]))
+ usleep_range(1000, 2000);
+ }
}
void bnx2x_netif_start(struct bnx2x *bp)
@@ -1678,7 +1891,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
bnx2x_napi_disable_cnic(bp);
}
-u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1700,10 +1914,9 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
}
/* select a non-FCoE queue */
- return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
+ return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
}
-
void bnx2x_set_num_queues(struct bnx2x *bp)
{
/* RSS queues */
@@ -1734,7 +1947,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
*
* If the actual number of Tx queues (for each CoS) is less than 16 then there
* will be the holes at the end of each group of 16 ETh L2 indices (0..15,
- * 16..31,...) with indicies that are not coupled with any real Tx queue.
+ * 16..31,...) with indices that are not coupled with any real Tx queue.
*
* The proper configuration of skb->queue_mapping is handled by
* bnx2x_select_queue() and __skb_tx_hash().
@@ -1796,7 +2009,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
ETH_OVREHEAD +
mtu +
BNX2X_FW_RX_ALIGN_END;
- /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
+ /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
else
@@ -1804,12 +2017,12 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
}
}
-static int bnx2x_init_rss_pf(struct bnx2x *bp)
+static int bnx2x_init_rss(struct bnx2x *bp)
{
int i;
u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
- /* Prepare the initial contents fo the indirection table if RSS is
+ /* Prepare the initial contents for the indirection table if RSS is
* enabled
*/
for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
@@ -1828,8 +2041,8 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
}
-int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- bool config_hash)
+int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+ bool config_hash, bool enable)
{
struct bnx2x_config_rss_params params = {NULL};
@@ -1844,17 +2057,21 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
- __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
-
- /* RSS configuration */
- __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
- __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
- __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
- __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
- if (rss_obj->udp_rss_v4)
- __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
- if (rss_obj->udp_rss_v6)
- __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
+ if (enable) {
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
+
+ /* RSS configuration */
+ __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+ if (rss_obj->udp_rss_v4)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
+ if (rss_obj->udp_rss_v6)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
+ } else {
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
+ }
/* Hash bits */
params.rss_result_mask = MULTI_MASK;
@@ -1863,11 +2080,14 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
if (config_hash) {
/* RSS keys */
- prandom_bytes(params.rss_key, sizeof(params.rss_key));
+ prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
}
- return bnx2x_config_rss(bp, &params);
+ if (IS_PF(bp))
+ return bnx2x_config_rss(bp, &params);
+ else
+ return bnx2x_vfpf_config_rss(bp, &params);
}
static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
@@ -1887,9 +2107,9 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
/*
* Cleans the object that have internal lists without sending
- * ramrods. Should be run when interrutps are disabled.
+ * ramrods. Should be run when interrupts are disabled.
*/
-static void bnx2x_squeeze_objects(struct bnx2x *bp)
+void bnx2x_squeeze_objects(struct bnx2x *bp)
{
int rc;
unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
@@ -1922,7 +2142,11 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
rparam.mcast_obj = &bp->mcast_obj;
__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
- /* Add a DEL command... */
+ /* Add a DEL command... - Since we're doing a driver cleanup only,
+ * we take a lock surrounding both the initial send and the CONTs,
+ * as we don't want a true completion to disrupt us in the middle.
+ */
+ netif_addr_lock_bh(bp->dev);
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
if (rc < 0)
BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
@@ -1934,11 +2158,13 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
if (rc < 0) {
BNX2X_ERR("Failed to clean multi-cast object: %d\n",
rc);
+ netif_addr_unlock_bh(bp->dev);
return;
}
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
}
+ netif_addr_unlock_bh(bp->dev);
}
#ifndef BNX2X_STOP_ON_ERROR
@@ -1968,27 +2194,224 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
} while (0)
#endif /*BNX2X_STOP_ON_ERROR*/
-bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
+static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
+{
+ BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ return;
+}
+
+static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
+{
+ int num_groups, vf_headroom = 0;
+ int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
+
+ /* number of queues for statistics is number of eth queues + FCoE */
+ u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
+
+ /* Total number of FW statistics requests =
+ * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
+ * and fcoe l2 queue) stats + num of queues (which includes another 1
+ * for fcoe l2 queue if applicable)
+ */
+ bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
+
+ /* vf stats appear in the request list, but their data is allocated by
+ * the VFs themselves. We don't include them in the bp->fw_stats_num as
+ * it is used to determine where to place the vf stats queries in the
+ * request struct
+ */
+ if (IS_SRIOV(bp))
+ vf_headroom = bnx2x_vf_headroom(bp);
+
+ /* Request is built from stats_query_header and an array of
+ * stats_query_cmd_group each of which contains
+ * STATS_QUERY_CMD_COUNT rules. The real number or requests is
+ * configured in the stats_query_header.
+ */
+ num_groups =
+ (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
+ (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
+ 1 : 0));
+
+ DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
+ bp->fw_stats_num, vf_headroom, num_groups);
+ bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
+ num_groups * sizeof(struct stats_query_cmd_group);
+
+ /* Data for statistics requests + stats_counter
+ * stats_counter holds per-STORM counters that are incremented
+ * when STORM has finished with the current request.
+ * memory for FCoE offloaded statistics are counted anyway,
+ * even if they will not be sent.
+ * VF stats are not accounted for here as the data of VF stats is stored
+ * in memory allocated by the VF, not here.
+ */
+ bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
+ sizeof(struct per_pf_stats) +
+ sizeof(struct fcoe_statistics_params) +
+ sizeof(struct per_queue_stats) * num_queue_stats +
+ sizeof(struct stats_counter);
+
+ bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ if (!bp->fw_stats)
+ goto alloc_mem_err;
+
+ /* Set shortcuts */
+ bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
+ bp->fw_stats_req_mapping = bp->fw_stats_mapping;
+ bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
+ ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
+ bp->fw_stats_data_mapping = bp->fw_stats_mapping +
+ bp->fw_stats_req_sz;
+
+ DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
+ U64_HI(bp->fw_stats_req_mapping),
+ U64_LO(bp->fw_stats_req_mapping));
+ DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
+ U64_HI(bp->fw_stats_data_mapping),
+ U64_LO(bp->fw_stats_data_mapping));
+ return 0;
+
+alloc_mem_err:
+ bnx2x_free_fw_stats_mem(bp);
+ BNX2X_ERR("Can't allocate FW stats memory\n");
+ return -ENOMEM;
+}
+
+/* send load request to mcp and analyze response */
+static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
{
- /* build FW version dword */
- u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
- (BCM_5710_FW_MINOR_VERSION << 8) +
- (BCM_5710_FW_REVISION_VERSION << 16) +
- (BCM_5710_FW_ENGINEERING_VERSION << 24);
+ u32 param;
- /* read loaded FW from chip */
- u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+ /* init fw_seq */
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
- DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
+ /* Get current FW pulse sequence */
+ bp->fw_drv_pulse_wr_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK);
+ BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
- if (loaded_fw != my_fw) {
- if (is_err)
- BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
- loaded_fw, my_fw);
- return false;
+ param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
+
+ if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
+ param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
+
+ /* load request */
+ (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
+
+ /* if mcp fails to respond we must abort */
+ if (!(*load_code)) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ return -EBUSY;
}
- return true;
+ /* If mcp refused (e.g. other port is in diagnostic mode) we
+ * must abort
+ */
+ if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+ BNX2X_ERR("MCP refused load request, aborting\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/* check whether another PF has already loaded FW to chip. In
+ * virtualized environments a pf from another VM may have already
+ * initialized the device including loading FW
+ */
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
+{
+ /* is another pf loaded on this engine? */
+ if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
+ load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
+ /* build my FW version dword */
+ u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
+ (BCM_5710_FW_MINOR_VERSION << 8) +
+ (BCM_5710_FW_REVISION_VERSION << 16) +
+ (BCM_5710_FW_ENGINEERING_VERSION << 24);
+
+ /* read loaded FW from chip */
+ u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+
+ DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
+ loaded_fw, my_fw);
+
+ /* abort nic load if version mismatch */
+ if (my_fw != loaded_fw) {
+ if (print_err)
+ BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
+ loaded_fw, my_fw);
+ else
+ BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
+ loaded_fw, my_fw);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+/* returns the "mcp load_code" according to global load_count array */
+static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
+{
+ int path = BP_PATH(bp);
+
+ DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ bnx2x_load_count[path][0]++;
+ bnx2x_load_count[path][1 + port]++;
+ DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ if (bnx2x_load_count[path][0] == 1)
+ return FW_MSG_CODE_DRV_LOAD_COMMON;
+ else if (bnx2x_load_count[path][1 + port] == 1)
+ return FW_MSG_CODE_DRV_LOAD_PORT;
+ else
+ return FW_MSG_CODE_DRV_LOAD_FUNCTION;
+}
+
+/* mark PMF if applicable */
+static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
+{
+ if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
+ bp->port.pmf = 1;
+ /* We need the barrier to ensure the ordering between the
+ * writing to bp->port.pmf here and reading it from the
+ * bnx2x_periodic_task().
+ */
+ smp_mb();
+ } else {
+ bp->port.pmf = 0;
+ }
+
+ DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+}
+
+static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
+{
+ if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
+ (bp->common.shmem2_base)) {
+ if (SHMEM2_HAS(bp, dcc_support))
+ SHMEM2_WR(bp, dcc_support,
+ (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
+ SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+ if (SHMEM2_HAS(bp, afex_driver_support))
+ SHMEM2_WR(bp, afex_driver_support,
+ SHMEM_AFEX_SUPPORTED_VERSION_ONE);
+ }
+
+ /* Set AFEX default VLAN tag to an invalid value */
+ bp->afex_def_vlan_tag = -1;
}
/**
@@ -2003,49 +2426,15 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
static void bnx2x_bz_fp(struct bnx2x *bp, int index)
{
struct bnx2x_fastpath *fp = &bp->fp[index];
- struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
-
int cos;
struct napi_struct orig_napi = fp->napi;
struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
- /* bzero bnx2x_fastpath contents */
- if (bp->stats_init) {
- memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
- memset(fp, 0, sizeof(*fp));
- } else {
- /* Keep Queue statistics */
- struct bnx2x_eth_q_stats *tmp_eth_q_stats;
- struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
-
- tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
- GFP_KERNEL);
- if (tmp_eth_q_stats)
- memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
- sizeof(struct bnx2x_eth_q_stats));
-
- tmp_eth_q_stats_old =
- kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
- GFP_KERNEL);
- if (tmp_eth_q_stats_old)
- memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
- sizeof(struct bnx2x_eth_q_stats_old));
-
- memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
- memset(fp, 0, sizeof(*fp));
-
- if (tmp_eth_q_stats) {
- memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
- sizeof(struct bnx2x_eth_q_stats));
- kfree(tmp_eth_q_stats);
- }
-
- if (tmp_eth_q_stats_old) {
- memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
- sizeof(struct bnx2x_eth_q_stats_old));
- kfree(tmp_eth_q_stats_old);
- }
- }
+ /* bzero bnx2x_fastpath contents */
+ if (fp->tpa_info)
+ memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
+ sizeof(struct bnx2x_agg_info));
+ memset(fp, 0, sizeof(*fp));
/* Restore the NAPI object as it has been already initialized */
fp->napi = orig_napi;
@@ -2066,8 +2455,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
BNX2X_NUM_ETH_QUEUES(bp) + index];
- /*
- * set the tpa flag for each queue. The tpa flag determines the queue
+ /* set the tpa flag for each queue. The tpa flag determines the queue
* minimal size so it must be set prior to queue memory allocation
*/
fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
@@ -2091,10 +2479,12 @@ int bnx2x_load_cnic(struct bnx2x *bp)
mutex_init(&bp->cnic_mutex);
- rc = bnx2x_alloc_mem_cnic(bp);
- if (rc) {
- BNX2X_ERR("Unable to allocate bp memory for cnic\n");
- LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ if (IS_PF(bp)) {
+ rc = bnx2x_alloc_mem_cnic(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate bp memory for cnic\n");
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ }
}
rc = bnx2x_alloc_fp_mem_cnic(bp);
@@ -2121,21 +2511,22 @@ int bnx2x_load_cnic(struct bnx2x *bp)
bnx2x_nic_init_cnic(bp);
- /* Enable Timer scan */
- REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
+ if (IS_PF(bp)) {
+ /* Enable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
- for_each_cnic_queue(bp, i) {
- rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
- if (rc) {
- BNX2X_ERR("Queue setup failed\n");
- LOAD_ERROR_EXIT(bp, load_error_cnic2);
+ /* setup cnic queues */
+ for_each_cnic_queue(bp, i) {
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
+ LOAD_ERROR_EXIT(bp, load_error_cnic2);
+ }
}
}
/* Initialize Rx filter. */
- netif_addr_lock_bh(bp->dev);
- bnx2x_set_rx_mode(bp->dev);
- netif_addr_unlock_bh(bp->dev);
+ bnx2x_set_rx_mode_inner(bp);
/* re-read iscsi info */
bnx2x_get_iscsi_info(bp);
@@ -2145,7 +2536,6 @@ int bnx2x_load_cnic(struct bnx2x *bp)
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
-
DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
return 0;
@@ -2158,8 +2548,7 @@ load_error_cnic2:
load_error_cnic1:
bnx2x_napi_disable_cnic(bp);
/* Update the number of queues without the cnic queues */
- rc = bnx2x_set_real_num_queues(bp, 0);
- if (rc)
+ if (bnx2x_set_real_num_queues(bp, 0))
BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
load_error_cnic0:
BNX2X_ERR("CNIC-related load failed\n");
@@ -2169,13 +2558,11 @@ load_error_cnic0:
#endif /* ! BNX2X_STOP_ON_ERROR */
}
-
/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
int port = BP_PORT(bp);
- u32 load_code;
- int i, rc;
+ int i, rc = 0, load_code = 0;
DP(NETIF_MSG_IFUP, "Starting NIC load\n");
DP(NETIF_MSG_IFUP,
@@ -2190,15 +2577,14 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
- /* Set the initial link reported state to link down */
- bnx2x_acquire_phy_lock(bp);
+ /* zero the structure w/o any lock, before SP handler is initialized */
memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
&bp->last_reported_link.link_report_flags);
- bnx2x_release_phy_lock(bp);
- /* must be called before memory allocation and HW init */
- bnx2x_ilt_set_info(bp);
+ if (IS_PF(bp))
+ /* must be called before memory allocation and HW init */
+ bnx2x_ilt_set_info(bp);
/*
* Zero fastpath structures preserving invariants like napi, which are
@@ -2217,8 +2603,33 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Set the receive queues buffer size */
bnx2x_set_rx_buf_size(bp);
- if (bnx2x_alloc_mem(bp))
- return -ENOMEM;
+ if (IS_PF(bp)) {
+ rc = bnx2x_alloc_mem(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate bp memory\n");
+ return rc;
+ }
+ }
+
+ /* need to be done after alloc mem, since it's self adjusting to amount
+ * of memory available for RSS queues
+ */
+ rc = bnx2x_alloc_fp_mem(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate memory for fps\n");
+ LOAD_ERROR_EXIT(bp, load_error0);
+ }
+
+ /* Allocated memory for FW statistics */
+ if (bnx2x_alloc_fw_stats_mem(bp))
+ LOAD_ERROR_EXIT(bp, load_error0);
+
+ /* request pf to initialize status blocks */
+ if (IS_VF(bp)) {
+ rc = bnx2x_vfpf_init(bp);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error0);
+ }
/* As long as bnx2x_alloc_mem() may possibly update
* bp->num_queues, bnx2x_set_real_num_queues() should always
@@ -2231,8 +2642,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
/* configure multi cos mappings in kernel.
- * this configuration may be overriden by a multi class queue discipline
- * or by a dcbx negotiation result.
+ * this configuration may be overridden by a multi class queue
+ * discipline or by a dcbx negotiation result.
*/
bnx2x_setup_tc(bp->dev, bp->max_cos);
@@ -2241,155 +2652,109 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
DP(NETIF_MSG_IFUP, "napi added\n");
bnx2x_napi_enable(bp);
- /* set pf load just before approaching the MCP */
- bnx2x_set_pf_load(bp);
-
- /* Send LOAD_REQUEST command to MCP
- * Returns the type of LOAD command:
- * if it is the first port to be initialized
- * common blocks should be initialized, otherwise - not
- */
- if (!BP_NOMCP(bp)) {
- /* init fw_seq */
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
- BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
-
- /* Get current FW pulse sequence */
- bp->fw_drv_pulse_wr_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
- DRV_PULSE_SEQ_MASK);
- BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
-
- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
- DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
- if (!load_code) {
- BNX2X_ERR("MCP response failure, aborting\n");
- rc = -EBUSY;
- LOAD_ERROR_EXIT(bp, load_error1);
- }
- if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
- BNX2X_ERR("Driver load refused\n");
- rc = -EBUSY; /* other port in diagnostic mode */
- LOAD_ERROR_EXIT(bp, load_error1);
- }
- if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
- load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
- /* abort nic load if version mismatch */
- if (!bnx2x_test_firmware_version(bp, true)) {
- rc = -EBUSY;
+ if (IS_PF(bp)) {
+ /* set pf load just before approaching the MCP */
+ bnx2x_set_pf_load(bp);
+
+ /* if mcp exists send load request and analyze response */
+ if (!BP_NOMCP(bp)) {
+ /* attempt to load pf */
+ rc = bnx2x_nic_load_request(bp, &load_code);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error1);
+
+ /* what did mcp say? */
+ rc = bnx2x_compare_fw_ver(bp, load_code, true);
+ if (rc) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
}
+ } else {
+ load_code = bnx2x_nic_load_no_mcp(bp, port);
}
- } else {
- int path = BP_PATH(bp);
-
- DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- load_count[path][0]++;
- load_count[path][1 + port]++;
- DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- if (load_count[path][0] == 1)
- load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
- else if (load_count[path][1 + port] == 1)
- load_code = FW_MSG_CODE_DRV_LOAD_PORT;
- else
- load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
- }
-
- if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
- (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
- (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
- bp->port.pmf = 1;
- /*
- * We need the barrier to ensure the ordering between the
- * writing to bp->port.pmf here and reading it from the
- * bnx2x_periodic_task().
- */
- smp_mb();
- } else
- bp->port.pmf = 0;
+ /* mark pmf if applicable */
+ bnx2x_nic_load_pmf(bp, load_code);
- DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
+ /* Init Function state controlling object */
+ bnx2x__init_func_obj(bp);
- /* Init Function state controlling object */
- bnx2x__init_func_obj(bp);
-
- /* Initialize HW */
- rc = bnx2x_init_hw(bp, load_code);
- if (rc) {
- BNX2X_ERR("HW init failed, aborting\n");
- bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- LOAD_ERROR_EXIT(bp, load_error2);
+ /* Initialize HW */
+ rc = bnx2x_init_hw(bp, load_code);
+ if (rc) {
+ BNX2X_ERR("HW init failed, aborting\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ LOAD_ERROR_EXIT(bp, load_error2);
+ }
}
+ bnx2x_pre_irq_nic_init(bp);
+
/* Connect to IRQs */
rc = bnx2x_setup_irqs(bp);
if (rc) {
- BNX2X_ERR("IRQs setup failed\n");
- bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ BNX2X_ERR("setup irqs failed\n");
+ if (IS_PF(bp))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
}
- /* Setup NIC internals and enable interrupts */
- bnx2x_nic_init(bp, load_code);
-
/* Init per-function objects */
- bnx2x_init_bp_objs(bp);
-
- if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
- (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
- (bp->common.shmem2_base)) {
- if (SHMEM2_HAS(bp, dcc_support))
- SHMEM2_WR(bp, dcc_support,
- (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
- SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
- if (SHMEM2_HAS(bp, afex_driver_support))
- SHMEM2_WR(bp, afex_driver_support,
- SHMEM_AFEX_SUPPORTED_VERSION_ONE);
- }
-
- /* Set AFEX default VLAN tag to an invalid value */
- bp->afex_def_vlan_tag = -1;
-
- bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
- rc = bnx2x_func_start(bp);
- if (rc) {
- BNX2X_ERR("Function start failed!\n");
- bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- LOAD_ERROR_EXIT(bp, load_error3);
- }
+ if (IS_PF(bp)) {
+ /* Setup NIC internals and enable interrupts */
+ bnx2x_post_irq_nic_init(bp, load_code);
+
+ bnx2x_init_bp_objs(bp);
+ bnx2x_iov_nic_init(bp);
+
+ /* Set AFEX default VLAN tag to an invalid value */
+ bp->afex_def_vlan_tag = -1;
+ bnx2x_nic_load_afex_dcc(bp, load_code);
+ bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+ rc = bnx2x_func_start(bp);
+ if (rc) {
+ BNX2X_ERR("Function start failed!\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- /* Send LOAD_DONE command to MCP */
- if (!BP_NOMCP(bp)) {
- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- if (!load_code) {
- BNX2X_ERR("MCP response failure, aborting\n");
- rc = -EBUSY;
LOAD_ERROR_EXIT(bp, load_error3);
}
+
+ /* Send LOAD_DONE command to MCP */
+ if (!BP_NOMCP(bp)) {
+ load_code = bnx2x_fw_command(bp,
+ DRV_MSG_CODE_LOAD_DONE, 0);
+ if (!load_code) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EBUSY;
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+ }
+
+ /* initialize FW coalescing state machines in RAM */
+ bnx2x_update_coalesce(bp);
}
+ /* setup the leading queue */
rc = bnx2x_setup_leading(bp);
if (rc) {
BNX2X_ERR("Setup leading failed!\n");
LOAD_ERROR_EXIT(bp, load_error3);
}
+ /* set up the rest of the queues */
for_each_nondefault_eth_queue(bp, i) {
- rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ if (IS_PF(bp))
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
+ else /* VF */
+ rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
if (rc) {
- BNX2X_ERR("Queue setup failed\n");
+ BNX2X_ERR("Queue %d setup failed\n", i);
LOAD_ERROR_EXIT(bp, load_error3);
}
}
- rc = bnx2x_init_rss_pf(bp);
+ /* setup rss */
+ rc = bnx2x_init_rss(bp);
if (rc) {
BNX2X_ERR("PF RSS init failed\n");
LOAD_ERROR_EXIT(bp, load_error3);
@@ -2399,38 +2764,43 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->state = BNX2X_STATE_OPEN;
/* Configure a ucast MAC */
- rc = bnx2x_set_eth_mac(bp, true);
+ if (IS_PF(bp))
+ rc = bnx2x_set_eth_mac(bp, true);
+ else /* vf */
+ rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
+ true);
if (rc) {
BNX2X_ERR("Setting Ethernet MAC failed\n");
LOAD_ERROR_EXIT(bp, load_error3);
}
- if (bp->pending_max) {
+ if (IS_PF(bp) && bp->pending_max) {
bnx2x_update_max_mf_config(bp, bp->pending_max);
bp->pending_max = 0;
}
- if (bp->port.pmf)
- bnx2x_initial_phy_init(bp, load_mode);
+ if (bp->port.pmf) {
+ rc = bnx2x_initial_phy_init(bp, load_mode);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
/* Start fast path */
/* Initialize Rx filter. */
- netif_addr_lock_bh(bp->dev);
- bnx2x_set_rx_mode(bp->dev);
- netif_addr_unlock_bh(bp->dev);
+ bnx2x_set_rx_mode_inner(bp);
/* Start the Tx */
switch (load_mode) {
case LOAD_NORMAL:
- /* Tx queue should be only reenabled */
+ /* Tx queue should be only re-enabled */
netif_tx_wake_all_queues(bp->dev);
break;
case LOAD_OPEN:
netif_tx_start_all_queues(bp->dev);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
break;
case LOAD_DIAG:
@@ -2453,8 +2823,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (CNIC_ENABLED(bp))
bnx2x_load_cnic(bp);
- /* mark driver is loaded in shmem2 */
- if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ if (IS_PF(bp))
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ /* mark driver is loaded in shmem2 */
u32 val;
val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
@@ -2463,7 +2836,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
/* Wait for all pending SP commands to complete */
- if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
+ if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
BNX2X_ERR("Timeout waiting for SP elements to complete\n");
bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
return -EBUSY;
@@ -2479,10 +2852,12 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
#ifndef BNX2X_STOP_ON_ERROR
load_error3:
- bnx2x_int_disable_sync(bp, 1);
+ if (IS_PF(bp)) {
+ bnx2x_int_disable_sync(bp, 1);
- /* Clean queueable objects */
- bnx2x_squeeze_objects(bp);
+ /* Clean queueable objects */
+ bnx2x_squeeze_objects(bp);
+ }
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
@@ -2492,7 +2867,7 @@ load_error3:
/* Release IRQs */
bnx2x_free_irq(bp);
load_error2:
- if (!BP_NOMCP(bp)) {
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
}
@@ -2500,15 +2875,36 @@ load_error2:
bp->port.pmf = 0;
load_error1:
bnx2x_napi_disable(bp);
+ bnx2x_del_all_napi(bp);
+
/* clear pf_load status, as it was already set */
- bnx2x_clear_pf_load(bp);
+ if (IS_PF(bp))
+ bnx2x_clear_pf_load(bp);
load_error0:
+ bnx2x_free_fw_stats_mem(bp);
+ bnx2x_free_fp_mem(bp);
bnx2x_free_mem(bp);
return rc;
#endif /* ! BNX2X_STOP_ON_ERROR */
}
+int bnx2x_drain_tx_queues(struct bnx2x *bp)
+{
+ u8 rc = 0, cos, i;
+
+ /* Wait until tx fastpath tasks complete */
+ for_each_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ for_each_cos_in_tx_queue(fp, cos)
+ rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
/* must be called with rtnl_lock */
int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
{
@@ -2518,15 +2914,16 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
/* mark driver is unloaded in shmem2 */
- if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
u32 val;
val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
}
- if ((bp->state == BNX2X_STATE_CLOSED) ||
- (bp->state == BNX2X_STATE_ERROR)) {
+ if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
+ (bp->state == BNX2X_STATE_CLOSED ||
+ bp->state == BNX2X_STATE_ERROR)) {
/* We can get here if the driver has been unloaded
* during parity error recovery and is either waiting for a
* leader to complete or for other functions to unload and
@@ -2544,14 +2941,25 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
return -EINVAL;
}
- /*
- * It's important to set the bp->state to the value different from
+ /* Nothing to do during unload if previous bnx2x_nic_load()
+ * have not completed successfully - all resources are released.
+ *
+ * we can get here only after unsuccessful ndo_* callback, during which
+ * dev->IFF_UP flag is still on.
+ */
+ if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
+ return 0;
+
+ /* It's important to set the bp->state to the value different from
* BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
* may restart the Tx from the NAPI context (see bnx2x_tx_int()).
*/
bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
smp_mb();
+ /* indicate to VFs that the PF is going down */
+ bnx2x_iov_channel_down(bp);
+
if (CNIC_LOADED(bp))
bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
@@ -2563,25 +2971,32 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
del_timer_sync(&bp->timer);
- /* Set ALWAYS_ALIVE bit in shmem */
- bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
-
- bnx2x_drv_pulse(bp);
+ if (IS_PF(bp)) {
+ /* Set ALWAYS_ALIVE bit in shmem */
+ bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
+ bnx2x_drv_pulse(bp);
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_save_statistics(bp);
+ }
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
- bnx2x_save_statistics(bp);
+ /* wait till consumers catch up with producers in all queues */
+ bnx2x_drain_tx_queues(bp);
- /* Cleanup the chip if needed */
- if (unload_mode != UNLOAD_RECOVERY)
+ /* if VF indicate to PF this function is going down (PF will delete sp
+ * elements and clear initializations
+ */
+ if (IS_VF(bp))
+ bnx2x_vfpf_close_vf(bp);
+ else if (unload_mode != UNLOAD_RECOVERY)
+ /* if this is a normal/close unload need to clean up chip*/
bnx2x_chip_cleanup(bp, unload_mode, keep_link);
else {
/* Send the UNLOAD_REQUEST to the MCP */
bnx2x_send_unload_req(bp, unload_mode);
- /*
- * Prevent transactions to host from the functions on the
+ /* Prevent transactions to host from the functions on the
* engine that doesn't reset global blocks in case of global
- * attention once gloabl blocks are reset and gates are opened
+ * attention once global blocks are reset and gates are opened
* (the engine which leader will perform the recovery
* last).
*/
@@ -2602,16 +3017,21 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
}
/*
- * At this stage no more interrupts will arrive so we may safly clean
+ * At this stage no more interrupts will arrive so we may safely clean
* the queueable objects here in case they failed to get cleaned so far.
*/
- bnx2x_squeeze_objects(bp);
+ if (IS_PF(bp))
+ bnx2x_squeeze_objects(bp);
/* There should be no more pending SP commands at this stage */
bp->sp_state = 0;
bp->port.pmf = 0;
+ /* clear pending work in rtnl task */
+ bp->sp_rtnl_state = 0;
+ smp_mb();
+
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
if (CNIC_LOADED(bp))
@@ -2619,19 +3039,27 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
- if (CNIC_LOADED(bp)) {
+ bnx2x_free_fp_mem(bp);
+ if (CNIC_LOADED(bp))
bnx2x_free_fp_mem_cnic(bp);
- bnx2x_free_mem_cnic(bp);
+
+ if (IS_PF(bp)) {
+ if (CNIC_LOADED(bp))
+ bnx2x_free_mem_cnic(bp);
}
bnx2x_free_mem(bp);
bp->state = BNX2X_STATE_CLOSED;
bp->cnic_loaded = false;
+ /* Clear driver version indication in shmem */
+ if (IS_PF(bp))
+ bnx2x_update_mng_version(bp);
+
/* Check if there are pending parity attentions. If there are - set
* RECOVERY_IN_PROGRESS.
*/
- if (bnx2x_chk_parity_attn(bp, &global, false)) {
+ if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
bnx2x_set_reset_in_progress(bp);
/* Set RESET_IS_GLOBAL if needed */
@@ -2639,11 +3067,12 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bnx2x_set_reset_global(bp);
}
-
/* The last driver must disable a "close the gate" if there is no
* parity attention or "process kill" pending.
*/
- if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
+ if (IS_PF(bp) &&
+ !bnx2x_clear_pf_load(bp) &&
+ bnx2x_reset_is_done(bp, BP_PATH(bp)))
bnx2x_disable_close_the_gate(bp);
DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
@@ -2656,16 +3085,16 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
u16 pmcsr;
/* If there is no power capability, silently succeed */
- if (!bp->pm_cap) {
+ if (!bp->pdev->pm_cap) {
BNX2X_DEV_INFO("No power capability. Breaking.\n");
return 0;
}
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+ pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
switch (state) {
case PCI_D0:
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
PCI_PM_CTRL_PME_STATUS));
@@ -2689,7 +3118,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
if (bp->wol)
pmcsr |= PCI_PM_CTRL_PME_ENABLE;
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
pmcsr);
/* No more memory access after this point until
@@ -2707,7 +3136,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
/*
* net_device service functions
*/
-int bnx2x_poll(struct napi_struct *napi, int budget)
+static int bnx2x_poll(struct napi_struct *napi, int budget)
{
int work_done = 0;
u8 cos;
@@ -2722,22 +3151,26 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
return 0;
}
#endif
+ if (!bnx2x_fp_lock_napi(fp))
+ return work_done;
for_each_cos_in_tx_queue(fp, cos)
if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
-
if (bnx2x_has_rx_work(fp)) {
work_done += bnx2x_rx_int(fp, budget - work_done);
/* must not complete if we consumed full budget */
- if (work_done >= budget)
+ if (work_done >= budget) {
+ bnx2x_fp_unlock_napi(fp);
break;
+ }
}
/* Fall out from the NAPI loop if needed */
- if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ if (!bnx2x_fp_unlock_napi(fp) &&
+ !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
/* No need to update SB for FCoE L2 ring as long as
* it's connected to the default SB and the SB
@@ -2779,15 +3212,41 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
return work_done;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+/* must be called with local_bh_disable()d */
+int bnx2x_low_latency_recv(struct napi_struct *napi)
+{
+ struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
+ napi);
+ struct bnx2x *bp = fp->bp;
+ int found = 0;
+
+ if ((bp->state == BNX2X_STATE_CLOSED) ||
+ (bp->state == BNX2X_STATE_ERROR) ||
+ (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
+ return LL_FLUSH_FAILED;
+
+ if (!bnx2x_fp_lock_poll(fp))
+ return LL_FLUSH_BUSY;
+
+ if (bnx2x_has_rx_work(fp))
+ found = bnx2x_rx_int(fp, 4);
+
+ bnx2x_fp_unlock_poll(fp);
+
+ return found;
+}
+#endif
+
/* we split the first BD into headers and data BDs
* to ease the pain of our fellow microcode engineers
* we use one mapping for both BDs
*/
-static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
- struct bnx2x_fp_txdata *txdata,
- struct sw_tx_bd *tx_buf,
- struct eth_tx_start_bd **tx_bd, u16 hlen,
- u16 bd_prod, int nbd)
+static u16 bnx2x_tx_split(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata,
+ struct sw_tx_bd *tx_buf,
+ struct eth_tx_start_bd **tx_bd, u16 hlen,
+ u16 bd_prod)
{
struct eth_tx_start_bd *h_tx_bd = *tx_bd;
struct eth_tx_bd *d_tx_bd;
@@ -2795,11 +3254,10 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
int old_len = le16_to_cpu(h_tx_bd->nbytes);
/* first fix first BD */
- h_tx_bd->nbd = cpu_to_le16(nbd);
h_tx_bd->nbytes = cpu_to_le16(hlen);
- DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
- h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
+ DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
+ h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
/* now get a new data BD
* (after the pbd) and fill it */
@@ -2826,43 +3284,66 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
return bd_prod;
}
-static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
+#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
+static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
{
+ __sum16 tsum = (__force __sum16) csum;
+
if (fix > 0)
- csum = (u16) ~csum_fold(csum_sub(csum,
- csum_partial(t_header - fix, fix, 0)));
+ tsum = ~csum_fold(csum_sub((__force __wsum) csum,
+ csum_partial(t_header - fix, fix, 0)));
else if (fix < 0)
- csum = (u16) ~csum_fold(csum_add(csum,
- csum_partial(t_header, -fix, 0)));
+ tsum = ~csum_fold(csum_add((__force __wsum) csum,
+ csum_partial(t_header, -fix, 0)));
- return swab16(csum);
+ return bswab16(tsum);
}
-static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
+static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
{
u32 rc;
+ __u8 prot = 0;
+ __be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
- rc = XMIT_PLAIN;
+ return XMIT_PLAIN;
- else {
- if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
- rc = XMIT_CSUM_V6;
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- rc |= XMIT_CSUM_TCP;
+ protocol = vlan_get_protocol(skb);
+ if (protocol == htons(ETH_P_IPV6)) {
+ rc = XMIT_CSUM_V6;
+ prot = ipv6_hdr(skb)->nexthdr;
+ } else {
+ rc = XMIT_CSUM_V4;
+ prot = ip_hdr(skb)->protocol;
+ }
+ if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
+ if (inner_ip_hdr(skb)->version == 6) {
+ rc |= XMIT_CSUM_ENC_V6;
+ if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
} else {
- rc = XMIT_CSUM_V4;
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ rc |= XMIT_CSUM_ENC_V4;
+ if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
rc |= XMIT_CSUM_TCP;
}
}
-
- if (skb_is_gso_v6(skb))
- rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
- else if (skb_is_gso(skb))
- rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
+ if (prot == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
+
+ if (skb_is_gso(skb)) {
+ if (skb_is_gso_v6(skb)) {
+ rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V6;
+ } else {
+ rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V4;
+ }
+ }
return rc;
}
@@ -2947,14 +3428,23 @@ exit_lbl:
}
#endif
-static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
- u32 xmit_type)
+static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+ u32 xmit_type)
{
+ struct ipv6hdr *ipv6;
+
*parsing_data |= (skb_shinfo(skb)->gso_size <<
ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
ETH_TX_PARSE_BD_E2_LSO_MSS;
- if ((xmit_type & XMIT_GSO_V6) &&
- (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+
+ if (xmit_type & XMIT_GSO_ENC_V6)
+ ipv6 = inner_ipv6_hdr(skb);
+ else if (xmit_type & XMIT_GSO_V6)
+ ipv6 = ipv6_hdr(skb);
+ else
+ ipv6 = NULL;
+
+ if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
@@ -2965,28 +3455,67 @@ static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
* @pbd: parse BD
* @xmit_type: xmit flags
*/
-static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
- struct eth_tx_parse_bd_e1x *pbd,
- u32 xmit_type)
+static void bnx2x_set_pbd_gso(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ struct eth_tx_start_bd *tx_start_bd,
+ u32 xmit_type)
{
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
- pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
- pbd->tcp_flags = pbd_tcp_flags(skb);
+ pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
+ pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
if (xmit_type & XMIT_GSO_V4) {
- pbd->ip_id = swab16(ip_hdr(skb)->id);
+ pbd->ip_id = bswab16(ip_hdr(skb)->id);
pbd->tcp_pseudo_csum =
- swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
- ip_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0));
+ bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
- } else
+ /* GSO on 57710/57711 needs FW to calculate IP checksum */
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
+ } else {
pbd->tcp_pseudo_csum =
- swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0));
+ bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+ }
- pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
+ pbd->global_data |=
+ cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
+}
+
+/**
+ * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
+ *
+ * @bp: driver handle
+ * @skb: packet skb
+ * @parsing_data: data to be updated
+ * @xmit_type: xmit flags
+ *
+ * 57712/578xx related, when skb has encapsulation
+ */
+static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
+{
+ *parsing_data |=
+ ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
+
+ if (xmit_type & XMIT_CSUM_TCP) {
+ *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+
+ return skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) - skb->data;
+ }
+
+ /* We support checksum offload for TCP and UDP only.
+ * No need to pass the UDP header length - it's a constant.
+ */
+ return skb_inner_transport_header(skb) +
+ sizeof(struct udphdr) - skb->data;
}
/**
@@ -2997,15 +3526,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
* @parsing_data: data to be updated
* @xmit_type: xmit flags
*
- * 57712 related
+ * 57712/578xx related
*/
-static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
- u32 *parsing_data, u32 xmit_type)
+static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
+ ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
if (xmit_type & XMIT_CSUM_TCP) {
*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
@@ -3013,25 +3542,22 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
- } else
- /* We support checksum offload for TCP and UDP only.
- * No need to pass the UDP header length - it's a constant.
- */
- return skb_transport_header(skb) +
- sizeof(struct udphdr) - skb->data;
+ }
+ /* We support checksum offload for TCP and UDP only.
+ * No need to pass the UDP header length - it's a constant.
+ */
+ return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
}
-static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
+/* set FW indication according to inner or outer protocols if tunneled */
+static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_start_bd *tx_start_bd,
+ u32 xmit_type)
{
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
- if (xmit_type & XMIT_CSUM_V4)
- tx_start_bd->bd_flags.as_bitfield |=
- ETH_TX_BD_FLAGS_IP_CSUM;
- else
- tx_start_bd->bd_flags.as_bitfield |=
- ETH_TX_BD_FLAGS_IPV6;
+ if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
if (!(xmit_type & XMIT_CSUM_TCP))
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
@@ -3045,16 +3571,17 @@ static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
* @pbd: parse BD to be updated
* @xmit_type: xmit flags
*/
-static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_parse_bd_e1x *pbd,
- u32 xmit_type)
+static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
{
u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
/* for now NS flag is not used in Linux */
pbd->global_data =
- (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
- ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
+ cpu_to_le16(hlen |
+ ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
pbd->ip_hlen_w = (skb_transport_header(skb) -
skb_network_header(skb)) >> 1;
@@ -3071,7 +3598,7 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
hlen = hlen*2;
if (xmit_type & XMIT_CSUM_TCP) {
- pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
+ pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
} else {
s8 fix = SKB_CS_OFF(skb); /* signed! */
@@ -3092,6 +3619,77 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
return hlen;
}
+static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e2 *pbd_e2,
+ struct eth_tx_parse_2nd_bd *pbd2,
+ u16 *global_data,
+ u32 xmit_type)
+{
+ u16 hlen_w = 0;
+ u8 outerip_off, outerip_len = 0;
+
+ /* from outer IP to transport */
+ hlen_w = (skb_inner_transport_header(skb) -
+ skb_network_header(skb)) >> 1;
+
+ /* transport len */
+ hlen_w += inner_tcp_hdrlen(skb) >> 1;
+
+ pbd2->fw_ip_hdr_to_payload_w = hlen_w;
+
+ /* outer IP header info */
+ if (xmit_type & XMIT_CSUM_V4) {
+ struct iphdr *iph = ip_hdr(skb);
+ u32 csum = (__force u32)(~iph->check) -
+ (__force u32)iph->tot_len -
+ (__force u32)iph->frag_off;
+
+ pbd2->fw_ip_csum_wo_len_flags_frag =
+ bswab16(csum_fold((__force __wsum)csum));
+ } else {
+ pbd2->fw_ip_hdr_to_payload_w =
+ hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
+ }
+
+ pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
+
+ pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
+
+ if (xmit_type & XMIT_GSO_V4) {
+ pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
+
+ pbd_e2->data.tunnel_data.pseudo_csum =
+ bswab16(~csum_tcpudp_magic(
+ inner_ip_hdr(skb)->saddr,
+ inner_ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+
+ outerip_len = ip_hdr(skb)->ihl << 1;
+ } else {
+ pbd_e2->data.tunnel_data.pseudo_csum =
+ bswab16(~csum_ipv6_magic(
+ &inner_ipv6_hdr(skb)->saddr,
+ &inner_ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+ }
+
+ outerip_off = (skb_network_header(skb) - skb->data) >> 1;
+
+ *global_data |=
+ outerip_off |
+ (!!(xmit_type & XMIT_CSUM_V6) <<
+ ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
+ (outerip_len <<
+ ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
+ ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
+
+ if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
+ SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
+ pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
+ }
+}
+
/* called with netif_tx_lock
* bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
* netif_wake_queue()
@@ -3107,6 +3705,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ struct eth_tx_parse_2nd_bd *pbd2 = NULL;
u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
int nbd, txq_index;
@@ -3134,7 +3733,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
txq_index, fp_index, txdata_index); */
- /* enable this debug print to view the tranmission details
+ /* enable this debug print to view the transmission details
DP(NETIF_MSG_TX_QUEUED,
"transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
txdata->cid, fp_index, txdata_index, txdata, fp); */
@@ -3151,17 +3750,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
- bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
- netif_tx_stop_queue(txq);
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
+ netif_tx_stop_queue(txq);
BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
DP(NETIF_MSG_TX_QUEUED,
- "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
+ "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
- ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
+ ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
+ skb->len);
eth = (struct ethhdr *)skb->data;
@@ -3173,7 +3773,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mac_type = MULTICAST_ADDRESS;
}
-#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
/* First, check if we need to linearize the skb (due to FW
restrictions). No need to check fragmentation if page size > 8K
(there will be no violation to FW restrictions) */
@@ -3221,12 +3821,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
first_bd = tx_start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- SET_FLAG(tx_start_bd->general_data,
- ETH_TX_START_BD_PARSE_NBDS,
- 0);
- /* header nbd */
- SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
+ /* header nbd: indirectly zero other flags! */
+ tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
/* remember the first BD of the packet */
tx_buf->first_bd = txdata->tx_bd_prod;
@@ -3242,8 +3839,19 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpu_to_le16(vlan_tx_tag_get(skb));
tx_start_bd->bd_flags.as_bitfield |=
(X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
- } else
- tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ } else {
+ /* when transmitting in a vf, start bd must hold the ethertype
+ * for fw to enforce it
+ */
+ if (IS_VF(bp))
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(ntohs(eth->h_proto));
+ else
+ /* used by FW for packet accounting */
+ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ }
+
+ nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
@@ -3254,23 +3862,68 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!CHIP_IS_E1x(bp)) {
pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
- /* Set PBD in checksum offload case */
- if (xmit_type & XMIT_CSUM)
+
+ if (xmit_type & XMIT_CSUM_ENC) {
+ u16 global_data = 0;
+
+ /* Set PBD in enc checksum offload case */
+ hlen = bnx2x_set_pbd_csum_enc(bp, skb,
+ &pbd_e2_parsing_data,
+ xmit_type);
+
+ /* turn on 2nd parsing and get a BD */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
+
+ memset(pbd2, 0, sizeof(*pbd2));
+
+ pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
+ (skb_inner_network_header(skb) -
+ skb->data) >> 1;
+
+ if (xmit_type & XMIT_GSO_ENC)
+ bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
+ &global_data,
+ xmit_type);
+
+ pbd2->global_data = cpu_to_le16(global_data);
+
+ /* add addition parse BD indication to start BD */
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_PARSE_NBDS, 1);
+ /* set encapsulation flag in start BD */
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_TUNNEL_EXIST, 1);
+
+ tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
+
+ nbd++;
+ } else if (xmit_type & XMIT_CSUM) {
+ /* Set PBD in checksum offload case w/o encapsulation */
hlen = bnx2x_set_pbd_csum_e2(bp, skb,
&pbd_e2_parsing_data,
xmit_type);
- if (IS_MF_SI(bp)) {
- /*
- * fill in the MAC addresses in the PBD - for local
- * switching
- */
- bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
- &pbd_e2->src_mac_addr_mid,
- &pbd_e2->src_mac_addr_lo,
+ }
+
+ /* Add the macs to the parsing BD if this is a vf or if
+ * Tx Switching is enabled.
+ */
+ if (IS_VF(bp)) {
+ /* override GRE parameters in BD */
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
+ &pbd_e2->data.mac_addr.src_mid,
+ &pbd_e2->data.mac_addr.src_lo,
eth->h_source);
- bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
- &pbd_e2->dst_mac_addr_mid,
- &pbd_e2->dst_mac_addr_lo,
+
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+ &pbd_e2->data.mac_addr.dst_mid,
+ &pbd_e2->data.mac_addr.dst_lo,
+ eth->h_dest);
+ } else if (bp->flags & TX_SWITCHING) {
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+ &pbd_e2->data.mac_addr.dst_mid,
+ &pbd_e2->data.mac_addr.dst_lo,
eth->h_dest);
}
@@ -3292,14 +3945,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Setup the data pointer of the first BD of the packet */
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
- nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
pkt_size = tx_start_bd->nbytes;
DP(NETIF_MSG_TX_QUEUED,
- "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
+ "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
- le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
+ le16_to_cpu(tx_start_bd->nbytes),
tx_start_bd->bd_flags.as_bitfield,
le16_to_cpu(tx_start_bd->vlan_or_ethertype));
@@ -3312,15 +3964,17 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
- if (unlikely(skb_headlen(skb) > hlen))
+ if (unlikely(skb_headlen(skb) > hlen)) {
+ nbd++;
bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
&tx_start_bd, hlen,
- bd_prod, ++nbd);
+ bd_prod);
+ }
if (!CHIP_IS_E1x(bp))
bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
xmit_type);
else
- bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
+ bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
}
/* Set the PBD's parsing_data field if not zero
@@ -3405,9 +4059,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pbd_e2)
DP(NETIF_MSG_TX_QUEUED,
"PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
- pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
- pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
- pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
+ pbd_e2,
+ pbd_e2->data.mac_addr.dst_hi,
+ pbd_e2->data.mac_addr.dst_mid,
+ pbd_e2->data.mac_addr.dst_lo,
+ pbd_e2->data.mac_addr.src_hi,
+ pbd_e2->data.mac_addr.src_mid,
+ pbd_e2->data.mac_addr.src_lo,
pbd_e2->parsing_data);
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
@@ -3467,7 +4125,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
/* setup tc must be called under rtnl lock */
ASSERT_RTNL();
- /* no traffic classes requested. aborting */
+ /* no traffic classes requested. Aborting */
if (!num_tc) {
netdev_reset_tc(dev);
return 0;
@@ -3475,7 +4133,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
/* requested to support too many traffic classes */
if (num_tc > bp->max_cos) {
- BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
+ BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
num_tc, bp->max_cos);
return -EINVAL;
}
@@ -3494,8 +4152,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
prio, bp->prio_to_cos[prio]);
}
-
- /* Use this configuration to diffrentiate tc0 from other COSes
+ /* Use this configuration to differentiate tc0 from other COSes
This can be used for ets or pfc, and save the effort of setting
up a multio class queue disc or negotiating DCBX with a switch
netdev_set_prio_tc_map(dev, 0, 0);
@@ -3542,7 +4199,6 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
if (netif_running(dev))
@@ -3617,7 +4273,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
/* end of fastpath */
}
-void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
{
int i;
for_each_cnic_queue(bp, i)
@@ -3662,7 +4318,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
* fp->eth_q_stats.rx_skb_alloc_failed = 0
*/
for (i = 0; i < rx_ring_size; i++) {
- if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
+ if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
failure_cnt++;
continue;
}
@@ -3738,19 +4394,24 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
} else /* if rx_ring_size specified - use it */
rx_ring_size = bp->rx_ring_size;
+ DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
+
/* Common */
sb = &bnx2x_fp(bp, index, status_blk);
if (!IS_FCOE_IDX(index)) {
/* status blocks */
- if (!CHIP_IS_E1x(bp))
- BNX2X_PCI_ALLOC(sb->e2_sb,
- &bnx2x_fp(bp, index, status_blk_mapping),
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_ALLOC(sb->e1x_sb,
- &bnx2x_fp(bp, index, status_blk_mapping),
- sizeof(struct host_hc_status_block_e1x));
+ if (!CHIP_IS_E1x(bp)) {
+ sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ if (!sb->e2_sb)
+ goto alloc_mem_err;
+ } else {
+ sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+ if (!sb->e1x_sb)
+ goto alloc_mem_err;
+ }
}
/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
@@ -3769,34 +4430,49 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
"allocating tx memory of fp %d cos %d\n",
index, cos);
- BNX2X_ALLOC(txdata->tx_buf_ring,
- sizeof(struct sw_tx_bd) * NUM_TX_BD);
- BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
- &txdata->tx_desc_mapping,
- sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
+ sizeof(struct sw_tx_bd),
+ GFP_KERNEL);
+ if (!txdata->tx_buf_ring)
+ goto alloc_mem_err;
+ txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ if (!txdata->tx_desc_ring)
+ goto alloc_mem_err;
}
}
/* Rx */
if (!skip_rx_queue(bp, index)) {
/* fastpath rx rings: rx_buf rx_desc rx_comp */
- BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
- sizeof(struct sw_rx_bd) * NUM_RX_BD);
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
- &bnx2x_fp(bp, index, rx_desc_mapping),
- sizeof(struct eth_rx_bd) * NUM_RX_BD);
+ bnx2x_fp(bp, index, rx_buf_ring) =
+ kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
+ if (!bnx2x_fp(bp, index, rx_buf_ring))
+ goto alloc_mem_err;
+ bnx2x_fp(bp, index, rx_desc_ring) =
+ BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+ if (!bnx2x_fp(bp, index, rx_desc_ring))
+ goto alloc_mem_err;
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
- &bnx2x_fp(bp, index, rx_comp_mapping),
- sizeof(struct eth_fast_path_rx_cqe) *
- NUM_RCQ_BD);
+ /* Seed all CQEs by 1s */
+ bnx2x_fp(bp, index, rx_comp_ring) =
+ BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
+ if (!bnx2x_fp(bp, index, rx_comp_ring))
+ goto alloc_mem_err;
/* SGE ring */
- BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
- sizeof(struct sw_rx_page) * NUM_RX_SGE);
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
- &bnx2x_fp(bp, index, rx_sge_mapping),
- BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ bnx2x_fp(bp, index, rx_page_ring) =
+ kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
+ GFP_KERNEL);
+ if (!bnx2x_fp(bp, index, rx_page_ring))
+ goto alloc_mem_err;
+ bnx2x_fp(bp, index, rx_sge_ring) =
+ BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ if (!bnx2x_fp(bp, index, rx_sge_ring))
+ goto alloc_mem_err;
/* RX BD ring */
bnx2x_set_next_page_rx_bd(fp);
@@ -3828,7 +4504,7 @@ alloc_mem_err:
return 0;
}
-int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
{
if (!NO_FCOE(bp))
/* FCoE */
@@ -3841,7 +4517,7 @@ int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
return 0;
}
-int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
{
int i;
@@ -3863,6 +4539,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
WARN_ON(delta < 0);
+ bnx2x_shrink_eth_fp(bp, delta);
if (CNIC_SUPPORT(bp))
/* move non eth FPs next to last eth FP
* must be done in that order
@@ -3883,7 +4560,10 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
void bnx2x_free_mem_bp(struct bnx2x *bp)
{
- kfree(bp->fp->tpa_info);
+ int i;
+
+ for (i = 0; i < bp->fp_array_size; i++)
+ kfree(bp->fp[i].tpa_info);
kfree(bp->fp);
kfree(bp->sp_objs);
kfree(bp->fp_stats);
@@ -3903,18 +4583,22 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp)
/*
* The biggest MSI-X table we might need is as a maximum number of fast
- * path IGU SBs plus default SB (for PF).
+ * path IGU SBs plus default SB (for PF only).
*/
- msix_table_size = bp->igu_sb_cnt + 1;
+ msix_table_size = bp->igu_sb_cnt;
+ if (IS_PF(bp))
+ msix_table_size++;
+ BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
/* fp array: RSS plus CNIC related L2 queues */
fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
- BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
+ bp->fp_array_size = fp_array_size;
+ BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
- fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
+ fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
if (!fp)
goto alloc_err;
- for (i = 0; i < fp_array_size; i++) {
+ for (i = 0; i < bp->fp_array_size; i++) {
fp[i].tpa_info =
kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
sizeof(struct bnx2x_agg_info), GFP_KERNEL);
@@ -3925,13 +4609,13 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp)
bp->fp = fp;
/* allocate sp objs */
- bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
+ bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
GFP_KERNEL);
if (!bp->sp_objs)
goto alloc_err;
/* allocate fp_stats */
- bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
+ bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
GFP_KERNEL);
if (!bp->fp_stats)
goto alloc_err;
@@ -3962,7 +4646,6 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp)
alloc_err:
bnx2x_free_mem_bp(bp);
return -ENOMEM;
-
}
int bnx2x_reload_if_running(struct net_device *dev)
@@ -4004,13 +4687,12 @@ int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
}
return sel_phy_idx;
-
}
int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
{
u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
/*
- * The selected actived PHY is always after swapping (in case PHY
+ * The selected activated PHY is always after swapping (in case PHY
* swapping is enabled). So when swapping is enabled, we need to reverse
* the configuration
*/
@@ -4092,6 +4774,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
u32 flags = bp->flags;
+ u32 changes;
bool bnx2x_reload = false;
if (features & NETIF_F_LRO)
@@ -4116,10 +4799,16 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
}
}
- if (flags ^ bp->flags) {
- bp->flags = flags;
+ changes = flags ^ bp->flags;
+
+ /* if GRO is changed while LRO is enabled, don't force a reload */
+ if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
+ changes &= ~GRO_ENABLE_FLAG;
+
+ if (changes)
bnx2x_reload = true;
- }
+
+ bp->flags = flags;
if (bnx2x_reload) {
if (bp->recovery_state == BNX2X_RECOVERY_DONE)
@@ -4139,12 +4828,8 @@ void bnx2x_tx_timeout(struct net_device *dev)
bnx2x_panic();
#endif
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
-
/* This allows the netif to be shutdown gracefully before resetting */
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
}
int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -4214,10 +4899,14 @@ int bnx2x_resume(struct pci_dev *pdev)
return rc;
}
-
void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
u32 cid)
{
+ if (!cxt) {
+ BNX2X_ERR("bad context pointer %p\n", cxt);
+ return;
+ }
+
/* ustorm cxt validation */
cxt->ustorm_ag_context.cdu_usage =
CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
@@ -4232,7 +4921,6 @@ static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
u8 fw_sb_id, u8 sb_index,
u8 ticks)
{
-
u32 addr = BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
REG_WR8(bp, addr, ticks);
@@ -4248,11 +4936,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
u32 addr = BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
- u16 flags = REG_RD16(bp, addr);
+ u8 flags = REG_RD8(bp, addr);
/* clear and set */
flags &= ~HC_INDEX_DATA_HC_ENABLED;
flags |= enable_flag;
- REG_WR16(bp, addr, flags);
+ REG_WR8(bp, addr, flags);
DP(NETIF_MSG_IFUP,
"port %x fw_sb_id %d sb_index %d disable %d\n",
port, fw_sb_id, sb_index, disable);
@@ -4269,3 +4957,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
disable = disable ? 1 : (usec ? 0 : 1);
storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
}
+
+void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
+ u32 verbose)
+{
+ smp_mb__before_atomic();
+ set_bit(flag, &bp->sp_rtnl_state);
+ smp_mb__after_atomic();
+ DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
+ flag);
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
+EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 0991534f61d..571427c7226 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1,12 +1,12 @@
/* bnx2x_cmn.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -21,15 +21,14 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-
+#include <linux/irq.h>
#include "bnx2x.h"
+#include "bnx2x_sriov.h"
/* This is used as a replacement for an MCP if it's not present */
-extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
-
-extern int num_queues;
-extern int int_mode;
+extern int bnx2x_load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
+extern int bnx2x_num_queues;
/************************ Macros ********************************/
#define BNX2X_PCI_FREE(x, y, size) \
@@ -49,20 +48,26 @@ extern int int_mode;
} \
} while (0)
-#define BNX2X_PCI_ALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- memset((void *)x, 0, size); \
- } while (0)
-
-#define BNX2X_ALLOC(x, size) \
- do { \
- x = kzalloc(size, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- } while (0)
+#define BNX2X_PCI_ALLOC(y, size) \
+({ \
+ void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x) \
+ DP(NETIF_MSG_HW, \
+ "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
+ (unsigned long long)(*y), x); \
+ x; \
+})
+#define BNX2X_PCI_FALLOC(y, size) \
+({ \
+ void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x) { \
+ memset(x, 0xff, size); \
+ DP(NETIF_MSG_HW, \
+ "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \
+ (unsigned long long)(*y), x); \
+ } \
+ x; \
+})
/*********************** Interfaces ****************************
* Functions that need to be implemented by each driver version
@@ -94,9 +99,10 @@ void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
* @rss_obj: RSS object to use
* @ind_table: indirection table to configure
* @config_hash: re-configure RSS hash keys configuration
+ * @enable: enabled or disabled configuration
*/
-int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- bool config_hash);
+int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+ bool config_hash, bool enable);
/**
* bnx2x__init_func_obj - init function object
@@ -196,6 +202,7 @@ void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
/* Disable transactions from chip to host */
void bnx2x_pf_disable(struct bnx2x *bp);
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
/**
* bnx2x__link_status_update - handles link status change.
@@ -293,16 +300,29 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
void bnx2x_nic_init_cnic(struct bnx2x *bp);
/**
- * bnx2x_nic_init - init driver internals.
+ * bnx2x_preirq_nic_init - init driver internals.
*
* @bp: driver handle
*
* Initializes:
- * - rings
+ * - fastpath object
+ * - fastpath rings
+ * etc.
+ */
+void bnx2x_pre_irq_nic_init(struct bnx2x *bp);
+
+/**
+ * bnx2x_postirq_nic_init - init driver internals.
+ *
+ * @bp: driver handle
+ * @load_code: COMMON, PORT or FUNCTION
+ *
+ * Initializes:
* - status blocks
+ * - slowpath rings
* - etc.
*/
-void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
+void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code);
/**
* bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
*
@@ -391,33 +411,7 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
* If bp->state is OPEN, should be called with
* netif_addr_lock_bh()
*/
-void bnx2x_set_rx_mode(struct net_device *dev);
-
-/**
- * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
- *
- * @bp: driver handle
- *
- * If bp->state is OPEN, should be called with
- * netif_addr_lock_bh().
- */
-void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
-
-/**
- * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
- *
- * @bp: driver handle
- * @cl_id: client id
- * @rx_mode_flags: rx mode configuration
- * @rx_accept_flags: rx accept configuration
- * @tx_accept_flags: tx accept configuration (tx switch)
- * @ramrod_flags: ramrod configuration
- */
-void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags);
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
/* Parity errors related */
void bnx2x_set_pf_load(struct bnx2x *bp);
@@ -477,13 +471,8 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
*/
void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
/* Error handling */
-void bnx2x_panic_dump(struct bnx2x *bp);
-
void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
-/* validate currect fw is loaded */
-bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err);
-
/* dev_close main block */
int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link);
@@ -496,20 +485,53 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
/* setup_tc callback */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
+int bnx2x_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi);
+int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
+int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
+
/* select_queue callback */
-u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback);
+
+static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp,
+ u16 bd_prod, u16 rx_comp_prod,
+ u16 rx_sge_prod)
+{
+ struct ustorm_eth_rx_producers rx_prods = {0};
+ u32 i;
+
+ /* Update producers */
+ rx_prods.bd_prod = bd_prod;
+ rx_prods.cqe_prod = rx_comp_prod;
+ rx_prods.sge_prod = rx_sge_prod;
+
+ /* Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ * This is only applicable for weak-ordered memory model archs such
+ * as IA-64. The following barrier is also mandatory since FW will
+ * assumes BDs must have buffers.
+ */
+ wmb();
+
+ for (i = 0; i < sizeof(rx_prods)/4; i++)
+ REG_WR(bp, fp->ustorm_rx_prods_offset + i*4,
+ ((u32 *)&rx_prods)[i]);
+
+ mmiowb(); /* keep prod updates ordered */
+
+ DP(NETIF_MSG_RX_STATUS,
+ "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
+ fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
+}
/* reload helper */
int bnx2x_reload_if_running(struct net_device *dev);
int bnx2x_change_mac_addr(struct net_device *dev, void *p);
-/* NAPI poll Rx part */
-int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
-
-void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
-
/* NAPI poll Tx part */
int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
@@ -520,13 +542,9 @@ int bnx2x_resume(struct pci_dev *pdev);
/* Release IRQ vectors */
void bnx2x_free_irq(struct bnx2x *bp);
-void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
void bnx2x_free_fp_mem(struct bnx2x *bp);
-int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
-int bnx2x_alloc_fp_mem(struct bnx2x *bp);
void bnx2x_init_rx_rings(struct bnx2x *bp);
void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
-void bnx2x_free_skbs_cnic(struct bnx2x *bp);
void bnx2x_free_skbs(struct bnx2x *bp);
void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
void bnx2x_netif_start(struct bnx2x *bp);
@@ -550,13 +568,11 @@ int bnx2x_enable_msix(struct bnx2x *bp);
int bnx2x_enable_msi(struct bnx2x *bp);
/**
- * bnx2x_poll - NAPI callback
+ * bnx2x_low_latency_recv - LL callback
*
* @napi: napi structure
- * @budget:
- *
*/
-int bnx2x_poll(struct napi_struct *napi, int budget);
+int bnx2x_low_latency_recv(struct napi_struct *napi);
/**
* bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
@@ -612,38 +628,6 @@ static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
}
-static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 bd_prod,
- u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
-{
- struct ustorm_eth_rx_producers rx_prods = {0};
- u32 i;
-
- /* Update producers */
- rx_prods.bd_prod = bd_prod;
- rx_prods.cqe_prod = rx_comp_prod;
- rx_prods.sge_prod = rx_sge_prod;
-
- /*
- * Make sure that the BD and SGE data is updated before updating the
- * producers since FW might read the BD/SGE right after the producer
- * is updated.
- * This is only applicable for weak-ordered memory model archs such
- * as IA-64. The following barrier is also mandatory since FW will
- * assumes BDs must have buffers.
- */
- wmb();
-
- for (i = 0; i < sizeof(rx_prods)/4; i++)
- REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
-
- mmiowb(); /* keep prod updates ordered */
-
- DP(NETIF_MSG_RX_STATUS,
- "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
- fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
-}
-
static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
u8 segment, u16 index, u8 op,
u8 update, u32 igu_addr)
@@ -784,16 +768,18 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
return false;
}
+#define BNX2X_IS_CQE_COMPLETED(cqe_fp) (cqe_fp->marker == 0x0)
+#define BNX2X_SEED_CQE(cqe_fp) (cqe_fp->marker = 0xFFFFFFFF)
static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
{
- u16 rx_cons_sb;
+ u16 cons;
+ union eth_rx_cqe *cqe;
+ struct eth_fast_path_rx_cqe *cqe_fp;
- /* Tell compiler that status block fields can change */
- barrier();
- rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
- if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
- rx_cons_sb++;
- return (fp->rx_comp_cons != rx_cons_sb);
+ cons = RCQ_BD(fp->rx_comp_cons);
+ cqe = &fp->rx_comp_ring[cons];
+ cqe_fp = &cqe->fast_path_cqe;
+ return BNX2X_IS_CQE_COMPLETED(cqe_fp);
}
/**
@@ -819,7 +805,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
return;
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
- SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+ SGE_PAGES, DMA_FROM_DEVICE);
__free_pages(page, PAGES_PER_SGE_SHIFT);
sw_buf->page = NULL;
@@ -827,43 +813,27 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
sge->addr_lo = 0;
}
-static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
-{
- int i;
-
- /* Add NAPI objects */
- for_each_rx_queue_cnic(bp, i)
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, BNX2X_NAPI_WEIGHT);
-}
-
-static inline void bnx2x_add_all_napi(struct bnx2x *bp)
-{
- int i;
-
- /* Add NAPI objects */
- for_each_eth_queue(bp, i)
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, BNX2X_NAPI_WEIGHT);
-}
-
static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
{
int i;
- for_each_rx_queue_cnic(bp, i)
+ for_each_rx_queue_cnic(bp, i) {
+ napi_hash_del(&bnx2x_fp(bp, i, napi));
netif_napi_del(&bnx2x_fp(bp, i, napi));
+ }
}
static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
- for_each_eth_queue(bp, i)
+ for_each_eth_queue(bp, i) {
+ napi_hash_del(&bnx2x_fp(bp, i, napi));
netif_napi_del(&bnx2x_fp(bp, i, napi));
+ }
}
-void bnx2x_set_int_mode(struct bnx2x *bp);
+int bnx2x_set_int_mode(struct bnx2x *bp);
static inline void bnx2x_disable_msi(struct bnx2x *bp)
{
@@ -876,14 +846,6 @@ static inline void bnx2x_disable_msi(struct bnx2x *bp)
}
}
-static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
-{
- return num_queues ?
- min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
- min_t(int, netif_get_num_default_rss_queues(),
- BNX2X_MAX_QUEUES(bp));
-}
-
static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
{
int i, j;
@@ -939,7 +901,7 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
{
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash);
+ return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true);
}
/**
@@ -970,10 +932,12 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
+ start_params->gre_tunnel_mode = L2GRE_TUNNEL;
+ start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
+
return bnx2x_func_state_change(bp, &func_params);
}
-
/**
* bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
*
@@ -982,8 +946,8 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
* @fw_lo: pointer to lower part
* @mac: pointer to MAC address
*/
-static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo,
- u8 *mac)
+static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
+ __le16 *fw_lo, u8 *mac)
{
((u8 *)fw_hi)[0] = mac[1];
((u8 *)fw_hi)[1] = mac[0];
@@ -1108,6 +1072,9 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
bnx2x_get_path_func_num(bp));
+ bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1,
+ bnx2x_get_path_func_num(bp));
+
/* RSS configuration object */
bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
@@ -1125,16 +1092,6 @@ static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
return fp->cl_id;
}
-static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
-{
- struct bnx2x *bp = fp->bp;
-
- if (!CHIP_IS_E1x(bp))
- return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
- else
- return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
-}
-
static inline void bnx2x_init_txdata(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata, u32 cid,
int txq_index, __le16 *tx_cons_sb,
@@ -1158,7 +1115,6 @@ static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
{
-
/* the 'first' id is allocated for the cnic */
return bp->base_fw_ndsb;
}
@@ -1168,48 +1124,6 @@ static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
return bp->igu_base_sb;
}
-
-static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
-{
- struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
- unsigned long q_type = 0;
-
- bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
- bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
- BNX2X_FCOE_ETH_CL_ID_IDX);
- bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
- bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
- bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
- bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
- bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
- fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
- fp);
-
- DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
-
- /* qZone id equals to FW (per path) client id */
- bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
- /* init shortcut */
- bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
- bnx2x_rx_ustorm_prods_offset(fp);
-
- /* Configure Queue State object */
- __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
- __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
-
- /* No multi-CoS for FCoE L2 client */
- BUG_ON(fp->max_cos != 1);
-
- bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
- &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
- bnx2x_sp_mapping(bp, q_rdata), q_type);
-
- DP(NETIF_MSG_IFUP,
- "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
- fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
- fp->igu_sb_id);
-}
-
static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata)
{
@@ -1228,7 +1142,7 @@ static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
#endif
}
cnt--;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
return 0;
@@ -1263,7 +1177,7 @@ static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
}
netif_addr_unlock_bh(bp->dev);
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
smp_mb();
@@ -1321,8 +1235,8 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE);
/*
- * 1. number of frags should not grow above MAX_SKB_FRAGS
- * 2. frag must fit the page
+ * 1. Number of frags should not grow above MAX_SKB_FRAGS
+ * 2. Frag must fit the page
*/
return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
}
@@ -1393,4 +1307,20 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
return false;
}
+/**
+ * bnx2x_fill_fw_str - Fill buffer with FW version string
+ *
+ * @bp: driver handle
+ * @buf: character buffer to fill with the fw name
+ * @buf_len: length of the above buffer
+ *
+ */
+void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
+
+int bnx2x_drain_tx_queues(struct bnx2x *bp);
+void bnx2x_squeeze_objects(struct bnx2x *bp);
+
+void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
+ u32 verbose);
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 10bc093d2ca..51a952c51cb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1,6 +1,6 @@
/* bnx2x_dcb.c: Broadcom Everest network driver.
*
- * Copyright 2009-2012 Broadcom Corporation
+ * Copyright 2009-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Dmitry Kravkov
*
*/
@@ -30,10 +30,8 @@
#include "bnx2x_dcb.h"
/* forward declarations of dcbx related functions */
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
u32 *set_configuration_ets_pg,
u32 *pri_pg_tbl);
@@ -253,7 +251,6 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
memset(&pg_help_data, 0, sizeof(struct pg_help_data));
-
if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ERROR\n");
@@ -298,7 +295,6 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
struct dcbx_pfc_feature *pfc, u32 error)
{
-
if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_ERROR\n");
@@ -367,7 +363,6 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
struct lldp_remote_mib *remote_mib ;
struct lldp_local_mib *local_mib;
-
switch (read_mib_type) {
case DCBX_READ_LOCAL_MIB:
mib_size = sizeof(struct lldp_local_mib);
@@ -416,6 +411,7 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
GET_FLAGS(SHMEM2_RD(bp, drv_flags),
1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
+
if (bp->dcbx_port_params.pfc.enabled &&
(!(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) || mfw_configured))
/*
@@ -427,30 +423,52 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
bnx2x_pfc_clear(bp);
}
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
{
struct bnx2x_func_state_params func_params = {NULL};
+ int rc;
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_TX_STOP;
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n");
- return bnx2x_func_state_change(bp, &func_params);
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+ BNX2X_ERR("Unable to hold traffic for HW configuration\n");
+ bnx2x_panic();
+ }
+
+ return rc;
}
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
{
struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_tx_start_params *tx_params =
&func_params.params.tx_start;
+ int rc;
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_TX_START;
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
bnx2x_dcbx_fw_struct(bp, tx_params);
DP(BNX2X_MSG_DCB, "START TRAFFIC\n");
- return bnx2x_func_state_change(bp, &func_params);
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+ BNX2X_ERR("Unable to resume traffic after HW configuration\n");
+ bnx2x_panic();
+ }
+
+ return rc;
}
static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
@@ -558,6 +576,7 @@ static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
GET_FLAGS(SHMEM2_RD(bp, drv_flags),
1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
+
bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
if (!bp->dcbx_port_params.ets.enabled ||
@@ -589,7 +608,7 @@ static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp)
DCBX_READ_REMOTE_MIB);
if (rc) {
- BNX2X_ERR("Faild to read remote mib from FW\n");
+ BNX2X_ERR("Failed to read remote mib from FW\n");
return rc;
}
@@ -617,7 +636,7 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
DCBX_READ_LOCAL_MIB);
if (rc) {
- BNX2X_ERR("Faild to read local mib from FW\n");
+ BNX2X_ERR("Failed to read local mib from FW\n");
return rc;
}
@@ -627,7 +646,6 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
return 0;
}
-
#ifdef BCM_DCBNL
static inline
u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
@@ -689,11 +707,10 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
}
/* setup tc must be called under rtnl lock, but we can't take it here
- * as we are handling an attetntion on a work queue which must be
+ * as we are handling an attention on a work queue which must be
* flushed at some rtnl-locked contexts (e.g. if down)
*/
- if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0);
}
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
@@ -709,7 +726,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
*/
bnx2x_dcbnl_update_applist(bp, true);
- /* Read rmeote mib if dcbx is in the FW */
+ /* Read remote mib if dcbx is in the FW */
if (bnx2x_dcbx_read_shmem_remote_mib(bp))
return;
#endif
@@ -740,14 +757,13 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bnx2x_dcbx_update_tc_mapping(bp);
/*
- * allow other funtions to update their netdevices
+ * allow other functions to update their netdevices
* accordingly
*/
if (IS_MF(bp))
bnx2x_link_sync_notify(bp);
- bnx2x_dcbx_stop_hw_tx(bp);
-
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0);
return;
}
case BNX2X_DCBX_STATE_TX_PAUSED:
@@ -755,8 +771,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bnx2x_pfc_set_pfc(bp);
bnx2x_dcbx_update_ets_params(bp);
- bnx2x_dcbx_resume_hw_tx(bp);
+ /* ets may affect cmng configuration: reinit it in hw */
+ bnx2x_set_local_cmng(bp);
return;
case BNX2X_DCBX_STATE_TX_RELEASED:
DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n");
@@ -862,7 +879,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
}
- /*For IEEE admin_recommendation_bw_precentage
+ /*For IEEE admin_recommendation_bw_percentage
*For IEEE admin_recommendation_ets_pg */
af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
@@ -894,13 +911,11 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
}
af->app.default_pri = (u8)dp->admin_default_priority;
-
}
/* Write the data. */
bnx2x_write_data(bp, (u32 *)&admin_mib, offset,
sizeof(struct lldp_admin_mib));
-
}
void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
@@ -1074,7 +1089,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
bool pg_found = false;
u32 i, traf_type, add_traf_type, add_pg;
u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
- struct pg_entry_help_data *data = help_data->data; /*shotcut*/
+ struct pg_entry_help_data *data = help_data->data; /*shortcut*/
/* Set to invalid */
for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
@@ -1170,7 +1185,8 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
else
/* If we join a group and one is strict
- * than the bw rulls */
+ * than the bw rules
+ */
cos_data->data[entry].strict =
BNX2X_DCBX_STRICT_COS_HIGHEST;
}
@@ -1179,7 +1195,6 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
BNX2X_ERR("dcbx error: Both groups must have priorities\n");
}
-
#ifndef POWER_OF_2
#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1))))
#endif
@@ -1282,7 +1297,7 @@ static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
} else {
/* If there are only pauseable priorities or
* only non-pauseable,* the lower priorities go
- * to the first queue and the higherpriorities go
+ * to the first queue and the higher priorities go
* to the second queue.
*/
cos_data->data[0].pausable =
@@ -1482,7 +1497,7 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
* queue and one priority goes to the second queue.
*
* We will join this two cases:
- * if one is BW limited it will go to the secoend queue
+ * if one is BW limited it will go to the second queue
* otherwise the last priority will get it
*/
@@ -1502,7 +1517,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
false == b_found_strict)
/* last entry will be handled separately
* If no priority is strict than last
- * enty goes to last queue.*/
+ * entry goes to last queue.
+ */
entry = 1;
cos_data->data[entry].pri_join_mask |=
pri_tested;
@@ -1514,7 +1530,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
b_found_strict = true;
cos_data->data[1].pri_join_mask |= pri_tested;
/* If we join a group and one is strict
- * than the bw rulls */
+ * than the bw rules
+ */
cos_data->data[1].strict =
BNX2X_DCBX_STRICT_COS_HIGHEST;
}
@@ -1522,7 +1539,6 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
}
}
-
static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp,
struct pg_help_data *help_data,
struct dcbx_ets_feature *ets,
@@ -1531,7 +1547,6 @@ static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp,
u32 pri_join_mask,
u8 num_of_dif_pri)
{
-
/* default E2 settings */
cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2;
@@ -1627,7 +1642,6 @@ static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp,
u8 num_spread_of_entries,
u8 strict_app_pris)
{
-
if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry,
num_spread_of_entries,
strict_app_pris)) {
@@ -1846,7 +1860,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
{
- /* if we need to syncronize DCBX result from prev PMF
+ /* if we need to synchronize DCBX result from prev PMF
* read it from shmem and update bp and netdev accordingly
*/
if (SHMEM2_HAS(bp, drv_flags) &&
@@ -1874,7 +1888,6 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
* dcbx negotiation.
*/
bnx2x_dcbx_update_tc_mapping(bp);
-
}
}
@@ -1904,11 +1917,13 @@ static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
struct bnx2x *bp = netdev_priv(netdev);
DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off");
+ /* Fail to set state to "enabled" if dcbx is disabled in nvram */
if (state && ((bp->dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ||
(bp->dcbx_enabled == BNX2X_DCBX_ENABLED_INVALID))) {
DP(BNX2X_MSG_DCB, "Can not set dcbx to enabled while it is disabled in nvm\n");
return 1;
}
+
bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
return 0;
}
@@ -1939,14 +1954,14 @@ static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
return;
/**
- * bw_pct ingnored - band-width percentage devision between user
+ * bw_pct ignored - band-width percentage devision between user
* priorities within the same group is not
* standard and hence not supported
*
- * prio_type igonred - priority levels within the same group are not
+ * prio_type ignored - priority levels within the same group are not
* standard and hence are not supported. According
* to the standard pgid 15 is dedicated to strict
- * prioirty traffic (on the port level).
+ * priority traffic (on the port level).
*
* up_map ignored
*/
@@ -1991,14 +2006,14 @@ static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
DP(BNX2X_MSG_DCB, "prio = %d\n", prio);
/**
- * bw_pct ingnored - band-width percentage devision between user
+ * bw_pct ignored - band-width percentage devision between user
* priorities within the same group is not
* standard and hence not supported
*
- * prio_type igonred - priority levels within the same group are not
+ * prio_type ignored - priority levels within the same group are not
* standard and hence are not supported. According
* to the standard pgid 15 is dedicated to strict
- * prioirty traffic (on the port level).
+ * priority traffic (on the port level).
*
* up_map ignored
*/
@@ -2052,7 +2067,6 @@ static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
return;
-
if (setting) {
bp->dcbx_config_params.admin_pfc_bitmap |= (1 << prio);
bp->dcbx_config_params.admin_pfc_tx_enable = 1;
@@ -2136,12 +2150,12 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
break;
default:
BNX2X_ERR("Non valid capability ID\n");
- rval = -EINVAL;
+ rval = 1;
break;
}
} else {
DP(BNX2X_MSG_DCB, "DCB disabled\n");
- rval = -EINVAL;
+ rval = 1;
}
DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
@@ -2167,12 +2181,12 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
break;
default:
BNX2X_ERR("Non valid TC-ID\n");
- rval = -EINVAL;
+ rval = 1;
break;
}
} else {
DP(BNX2X_MSG_DCB, "DCB disabled\n");
- rval = -EINVAL;
+ rval = 1;
}
return rval;
@@ -2185,7 +2199,7 @@ static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
return -EINVAL;
}
-static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
+static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
{
struct bnx2x *bp = netdev_priv(netdev);
DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
@@ -2368,31 +2382,34 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
case DCB_FEATCFG_ATTR_PG:
if (bp->dcbx_local_feat.ets.enabled)
*flags |= DCB_FEATCFG_ENABLE;
- if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
+ if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
case DCB_FEATCFG_ATTR_PFC:
if (bp->dcbx_local_feat.pfc.enabled)
*flags |= DCB_FEATCFG_ENABLE;
if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
- DCBX_LOCAL_PFC_MISMATCH))
+ DCBX_LOCAL_PFC_MISMATCH |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
case DCB_FEATCFG_ATTR_APP:
if (bp->dcbx_local_feat.app.enabled)
*flags |= DCB_FEATCFG_ENABLE;
if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
- DCBX_LOCAL_APP_MISMATCH))
+ DCBX_LOCAL_APP_MISMATCH |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
default:
- BNX2X_ERR("Non valid featrue-ID\n");
- rval = -EINVAL;
+ BNX2X_ERR("Non valid feature-ID\n");
+ rval = 1;
break;
}
} else {
DP(BNX2X_MSG_DCB, "DCB disabled\n");
- rval = -EINVAL;
+ rval = 1;
}
return rval;
@@ -2427,13 +2444,13 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
flags & DCB_FEATCFG_WILLING ? 1 : 0;
break;
default:
- BNX2X_ERR("Non valid featrue-ID\n");
- rval = -EINVAL;
+ BNX2X_ERR("Non valid feature-ID\n");
+ rval = 1;
break;
}
} else {
DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
- rval = -EINVAL;
+ rval = 1;
}
return rval;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 06c7a043594..c6939ecb02c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -1,6 +1,6 @@
/* bnx2x_dcb.h: Broadcom Everest network driver.
*
- * Copyright 2009-2012 Broadcom Corporation
+ * Copyright 2009-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Dmitry Kravkov
*
*/
@@ -134,8 +134,6 @@ enum {
#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130
#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170
-
-
struct cos_entry_help_data {
u32 pri_join_mask;
u32 cos_bw;
@@ -170,7 +168,6 @@ struct cos_help_data {
(!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
-
struct pg_entry_help_data {
u8 num_of_dif_pri;
u8 pg;
@@ -202,4 +199,7 @@ extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
#endif /* BCM_DCBNL */
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+
#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index b926f58e983..12eb4baee9f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -1,6 +1,6 @@
/* bnx2x_dump.h: Broadcom Everest network driver.
*
- * Copyright (c) 2012 Broadcom Corporation
+ * Copyright (c) 2012-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -13,129 +13,39 @@
* consent.
*/
-
-/* This struct holds a signature to ensure the dump returned from the driver
- * match the meta data file inserted to grc_dump.tcl
- * The signature is time stamp, diag version and grc_dump version
- */
-
#ifndef BNX2X_DUMP_H
#define BNX2X_DUMP_H
+/* WaitP Definitions */
+#define DRV_DUMP_XSTORM_WAITP_ADDRESS 0x2b8a80
+#define DRV_DUMP_TSTORM_WAITP_ADDRESS 0x1b8a80
+#define DRV_DUMP_USTORM_WAITP_ADDRESS 0x338a80
+#define DRV_DUMP_CSTORM_WAITP_ADDRESS 0x238a80
+/* Possible Chips */
+#define DUMP_CHIP_E1 1
+#define DUMP_CHIP_E1H 2
+#define DUMP_CHIP_E2 4
+#define DUMP_CHIP_E3A0 8
+#define DUMP_CHIP_E3B0 16
+#define DUMP_PATH_0 512
+#define DUMP_PATH_1 1024
+#define NUM_PRESETS 13
+#define NUM_CHIPS 5
-/*definitions */
-#define XSTORM_WAITP_ADDR 0x2b8a80
-#define TSTORM_WAITP_ADDR 0x1b8a80
-#define USTORM_WAITP_ADDR 0x338a80
-#define CSTORM_WAITP_ADDR 0x238a80
-#define TSTORM_CAM_MODE 0x1B1440
-
-#define MAX_TIMER_PENDING 200
-#define TIMER_SCAN_DONT_CARE 0xFF
-#define RI_E1 0x1
-#define RI_E1H 0x2
-#define RI_E2 0x4
-#define RI_E3 0x8
-#define RI_E3B0 0x10
-#define RI_ONLINE 0x100
-#define RI_OFFLINE 0x0
-#define RI_PATH0_DUMP 0x200
-#define RI_PATH1_DUMP 0x400
-
-#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
-#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
-#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
-#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
-#define RI_E1E2_ONLINE (RI_E1 | RI_E2 | RI_ONLINE)
-#define RI_E1HE2_ONLINE (RI_E1H | RI_E2 | RI_ONLINE)
-#define RI_E1E1HE2_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
-#define RI_E3_ONLINE (RI_E3 | RI_ONLINE)
-#define RI_E1E3_ONLINE (RI_E1 | RI_E3 | RI_ONLINE)
-#define RI_E1HE3_ONLINE (RI_E1H | RI_E3 | RI_ONLINE)
-#define RI_E1E1HE3_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_ONLINE)
-#define RI_E2E3_ONLINE (RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E1E2E3_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E1HE2E3_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E1E1HE2E3_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E3B0_ONLINE (RI_E3B0 | RI_ONLINE)
-#define RI_E1E3B0_ONLINE (RI_E1 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE3B0_ONLINE (RI_E1H | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE3B0_ONLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_ONLINE)
-#define RI_E2E3B0_ONLINE (RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E2E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE2E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE2E3B0_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E3E3B0_ONLINE (RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E3E3B0_ONLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE3E3B0_ONLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE3E3B0_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E2E3E3B0_ONLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E2E3E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE2E3E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE2E3E3B0_ONLINE \
- (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1_OFFLINE (RI_E1 | RI_OFFLINE)
-#define RI_E1H_OFFLINE (RI_E1H | RI_OFFLINE)
-#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H | RI_OFFLINE)
-#define RI_E2_OFFLINE (RI_E2 | RI_OFFLINE)
-#define RI_E1E2_OFFLINE (RI_E1 | RI_E2 | RI_OFFLINE)
-#define RI_E1HE2_OFFLINE (RI_E1H | RI_E2 | RI_OFFLINE)
-#define RI_E1E1HE2_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_OFFLINE)
-#define RI_E3_OFFLINE (RI_E3 | RI_OFFLINE)
-#define RI_E1E3_OFFLINE (RI_E1 | RI_E3 | RI_OFFLINE)
-#define RI_E1HE3_OFFLINE (RI_E1H | RI_E3 | RI_OFFLINE)
-#define RI_E1E1HE3_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_OFFLINE)
-#define RI_E2E3_OFFLINE (RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E1E2E3_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E1HE2E3_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E1E1HE2E3_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E3B0_OFFLINE (RI_E3B0 | RI_OFFLINE)
-#define RI_E1E3B0_OFFLINE (RI_E1 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE3B0_OFFLINE (RI_E1H | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_OFFLINE)
-#define RI_E2E3B0_OFFLINE (RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E2E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE2E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE2E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E3E3B0_OFFLINE (RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E3E3B0_OFFLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE3E3B0_OFFLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE3E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E2E3E3B0_OFFLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E2E3E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE2E3E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE2E3E3B0_OFFLINE \
- (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_ALL_ONLINE RI_E1E1HE2E3E3B0_ONLINE
-#define RI_ALL_OFFLINE RI_E1E1HE2E3E3B0_OFFLINE
-
-#define DBG_DMP_TRACE_BUFFER_SIZE 0x800
-#define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \
- ((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE)
-
-struct dump_sign {
- u32 time_stamp;
- u32 diag_ver;
- u32 grc_dump_ver;
-};
-
-struct dump_hdr {
- u32 hdr_size; /* in dwords, excluding this field */
- struct dump_sign dump_sign;
- u32 xstorm_waitp;
- u32 tstorm_waitp;
- u32 ustorm_waitp;
- u32 cstorm_waitp;
- u16 info;
- u8 idle_chk;
- u8 reserved;
+struct dump_header {
+ u32 header_size; /* Size in DWORDs excluding this field */
+ u32 version;
+ u32 preset;
+ u32 dump_meta_data; /* OR of CHIP and PATH. */
};
+#define BNX2X_DUMP_VERSION 0x50acff01
struct reg_addr {
u32 addr;
u32 size;
- u16 info;
+ u32 chips;
+ u32 presets;
};
struct wreg_addr {
@@ -143,1005 +53,2168 @@ struct wreg_addr {
u32 size;
u32 read_regs_count;
const u32 *read_regs;
- u16 info;
+ u32 chips;
+ u32 presets;
+};
+
+#define PAGE_MODE_VALUES_E2 2
+#define PAGE_READ_REGS_E2 1
+#define PAGE_WRITE_REGS_E2 1
+static const u32 page_vals_e2[] = {0, 128};
+static const u32 page_write_regs_e2[] = {328476};
+static const struct reg_addr page_read_regs_e2[] = {
+ {0x58000, 4608, DUMP_CHIP_E2, 0x30}
+};
+
+#define PAGE_MODE_VALUES_E3 2
+#define PAGE_READ_REGS_E3 1
+#define PAGE_WRITE_REGS_E3 1
+static const u32 page_vals_e3[] = {0, 128};
+static const u32 page_write_regs_e3[] = {328476};
+static const struct reg_addr page_read_regs_e3[] = {
+ {0x58000, 4608, DUMP_CHIP_E3A0 | DUMP_CHIP_E3B0, 0x30}
};
static const struct reg_addr reg_addrs[] = {
- { 0x2000, 341, RI_ALL_ONLINE },
- { 0x2800, 103, RI_ALL_ONLINE },
- { 0x3000, 287, RI_ALL_ONLINE },
- { 0x3800, 331, RI_ALL_ONLINE },
- { 0x8800, 6, RI_ALL_ONLINE },
- { 0x8818, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x9000, 147, RI_E2E3E3B0_ONLINE },
- { 0x924c, 1, RI_E2_ONLINE },
- { 0x9250, 16, RI_E2E3E3B0_ONLINE },
- { 0x9400, 33, RI_E2E3E3B0_ONLINE },
- { 0x9484, 5, RI_E3E3B0_ONLINE },
- { 0xa000, 27, RI_ALL_ONLINE },
- { 0xa06c, 1, RI_E1E1H_ONLINE },
- { 0xa070, 71, RI_ALL_ONLINE },
- { 0xa18c, 4, RI_E1E1H_ONLINE },
- { 0xa19c, 62, RI_ALL_ONLINE },
- { 0xa294, 2, RI_E1E1H_ONLINE },
- { 0xa29c, 2, RI_ALL_ONLINE },
- { 0xa2a4, 2, RI_E1E1HE2_ONLINE },
- { 0xa2ac, 52, RI_ALL_ONLINE },
- { 0xa39c, 7, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3b8, 2, RI_E3E3B0_ONLINE },
- { 0xa3c0, 3, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3d0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3d8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3e0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3e8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3f0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3f8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa400, 40, RI_ALL_ONLINE },
- { 0xa4a0, 1, RI_E1E1HE2_ONLINE },
- { 0xa4a4, 2, RI_ALL_ONLINE },
- { 0xa4ac, 2, RI_E1E1H_ONLINE },
- { 0xa4b4, 1, RI_E1E1HE2_ONLINE },
- { 0xa4b8, 2, RI_E1E1H_ONLINE },
- { 0xa4c0, 3, RI_ALL_ONLINE },
- { 0xa4cc, 5, RI_E1E1H_ONLINE },
- { 0xa4e0, 3, RI_ALL_ONLINE },
- { 0xa4fc, 2, RI_ALL_ONLINE },
- { 0xa504, 1, RI_E1E1H_ONLINE },
- { 0xa508, 3, RI_ALL_ONLINE },
- { 0xa518, 1, RI_ALL_ONLINE },
- { 0xa520, 1, RI_ALL_ONLINE },
- { 0xa528, 1, RI_ALL_ONLINE },
- { 0xa530, 1, RI_ALL_ONLINE },
- { 0xa538, 1, RI_ALL_ONLINE },
- { 0xa540, 1, RI_ALL_ONLINE },
- { 0xa548, 1, RI_E1E1H_ONLINE },
- { 0xa550, 1, RI_E1E1H_ONLINE },
- { 0xa558, 1, RI_E1E1H_ONLINE },
- { 0xa560, 1, RI_E1E1H_ONLINE },
- { 0xa568, 1, RI_E1E1H_ONLINE },
- { 0xa570, 1, RI_ALL_ONLINE },
- { 0xa580, 1, RI_ALL_ONLINE },
- { 0xa590, 1, RI_ALL_ONLINE },
- { 0xa5a0, 1, RI_E1E1HE2_ONLINE },
- { 0xa5c0, 1, RI_ALL_ONLINE },
- { 0xa5e0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa5e8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa5f0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa5f8, 1, RI_E1HE2_ONLINE },
- { 0xa5fc, 9, RI_E1HE2E3E3B0_ONLINE },
- { 0xa620, 6, RI_E2E3E3B0_ONLINE },
- { 0xa638, 20, RI_E2_ONLINE },
- { 0xa688, 42, RI_E2E3E3B0_ONLINE },
- { 0xa730, 1, RI_E2_ONLINE },
- { 0xa734, 2, RI_E2E3E3B0_ONLINE },
- { 0xa73c, 4, RI_E2_ONLINE },
- { 0xa74c, 5, RI_E2E3E3B0_ONLINE },
- { 0xa760, 5, RI_E2_ONLINE },
- { 0xa774, 7, RI_E2E3E3B0_ONLINE },
- { 0xa790, 15, RI_E2_ONLINE },
- { 0xa7cc, 4, RI_E2E3E3B0_ONLINE },
- { 0xa7e0, 6, RI_E3E3B0_ONLINE },
- { 0xa800, 18, RI_E2_ONLINE },
- { 0xa848, 33, RI_E2E3E3B0_ONLINE },
- { 0xa8cc, 2, RI_E3E3B0_ONLINE },
- { 0xa8d4, 4, RI_E2E3E3B0_ONLINE },
- { 0xa8e4, 1, RI_E3E3B0_ONLINE },
- { 0xa8e8, 1, RI_E2E3E3B0_ONLINE },
- { 0xa8f0, 1, RI_E2E3E3B0_ONLINE },
- { 0xa8f8, 30, RI_E3E3B0_ONLINE },
- { 0xa974, 73, RI_E3E3B0_ONLINE },
- { 0xac30, 1, RI_E3E3B0_ONLINE },
- { 0xac40, 1, RI_E3E3B0_ONLINE },
- { 0xac50, 1, RI_E3E3B0_ONLINE },
- { 0xac60, 1, RI_E3B0_ONLINE },
- { 0x10000, 9, RI_ALL_ONLINE },
- { 0x10024, 1, RI_E1E1HE2_ONLINE },
- { 0x10028, 5, RI_ALL_ONLINE },
- { 0x1003c, 6, RI_E1E1HE2_ONLINE },
- { 0x10054, 20, RI_ALL_ONLINE },
- { 0x100a4, 4, RI_E1E1HE2_ONLINE },
- { 0x100b4, 11, RI_ALL_ONLINE },
- { 0x100e0, 4, RI_E1E1HE2_ONLINE },
- { 0x100f0, 8, RI_ALL_ONLINE },
- { 0x10110, 6, RI_E1E1HE2_ONLINE },
- { 0x10128, 110, RI_ALL_ONLINE },
- { 0x102e0, 4, RI_E1E1HE2_ONLINE },
- { 0x102f0, 18, RI_ALL_ONLINE },
- { 0x10338, 20, RI_E1E1HE2_ONLINE },
- { 0x10388, 10, RI_ALL_ONLINE },
- { 0x10400, 6, RI_E1E1HE2_ONLINE },
- { 0x10418, 6, RI_ALL_ONLINE },
- { 0x10430, 10, RI_E1E1HE2_ONLINE },
- { 0x10458, 22, RI_ALL_ONLINE },
- { 0x104b0, 12, RI_E1E1HE2_ONLINE },
- { 0x104e0, 1, RI_ALL_ONLINE },
- { 0x104e8, 2, RI_ALL_ONLINE },
- { 0x104f4, 2, RI_ALL_ONLINE },
- { 0x10500, 146, RI_ALL_ONLINE },
- { 0x10750, 2, RI_E1E1HE2_ONLINE },
- { 0x10760, 2, RI_E1E1HE2_ONLINE },
- { 0x10770, 2, RI_E1E1HE2_ONLINE },
- { 0x10780, 2, RI_E1E1HE2_ONLINE },
- { 0x10790, 2, RI_ALL_ONLINE },
- { 0x107a0, 2, RI_E1E1HE2_ONLINE },
- { 0x107b0, 2, RI_E1E1HE2_ONLINE },
- { 0x107c0, 2, RI_E1E1HE2_ONLINE },
- { 0x107d0, 2, RI_E1E1HE2_ONLINE },
- { 0x107e0, 2, RI_ALL_ONLINE },
- { 0x10880, 2, RI_ALL_ONLINE },
- { 0x10900, 2, RI_ALL_ONLINE },
- { 0x16000, 1, RI_E1HE2_ONLINE },
- { 0x16004, 25, RI_E1HE2E3E3B0_ONLINE },
- { 0x16070, 8, RI_E1HE2E3E3B0_ONLINE },
- { 0x16090, 4, RI_E1HE2E3_ONLINE },
- { 0x160a0, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x160c0, 7, RI_E1HE2E3E3B0_ONLINE },
- { 0x160dc, 2, RI_E1HE2_ONLINE },
- { 0x160e4, 10, RI_E1HE2E3E3B0_ONLINE },
- { 0x1610c, 2, RI_E1HE2_ONLINE },
- { 0x16114, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x16140, 48, RI_E1HE2E3E3B0_ONLINE },
- { 0x16204, 5, RI_E1HE2E3E3B0_ONLINE },
- { 0x18000, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x18008, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x18010, 35, RI_E2E3E3B0_ONLINE },
- { 0x180a4, 2, RI_E2E3E3B0_ONLINE },
- { 0x180c0, 9, RI_E2E3E3B0_ONLINE },
- { 0x180e4, 1, RI_E2E3_ONLINE },
- { 0x180e8, 2, RI_E2E3E3B0_ONLINE },
- { 0x180f0, 1, RI_E2E3_ONLINE },
- { 0x180f4, 79, RI_E2E3E3B0_ONLINE },
- { 0x18230, 1, RI_E2E3_ONLINE },
- { 0x18234, 2, RI_E2E3E3B0_ONLINE },
- { 0x1823c, 1, RI_E2E3_ONLINE },
- { 0x18240, 13, RI_E2E3E3B0_ONLINE },
- { 0x18274, 1, RI_E2_ONLINE },
- { 0x18278, 81, RI_E2E3E3B0_ONLINE },
- { 0x18440, 63, RI_E2E3E3B0_ONLINE },
- { 0x18570, 42, RI_E3E3B0_ONLINE },
- { 0x18618, 25, RI_E3B0_ONLINE },
- { 0x18680, 44, RI_E3B0_ONLINE },
- { 0x18748, 12, RI_E3B0_ONLINE },
- { 0x18788, 1, RI_E3B0_ONLINE },
- { 0x1879c, 6, RI_E3B0_ONLINE },
- { 0x187c4, 51, RI_E3B0_ONLINE },
- { 0x18a00, 48, RI_E3B0_ONLINE },
- { 0x20000, 24, RI_ALL_ONLINE },
- { 0x20060, 8, RI_ALL_ONLINE },
- { 0x20080, 94, RI_ALL_ONLINE },
- { 0x201f8, 1, RI_E1E1H_ONLINE },
- { 0x201fc, 1, RI_ALL_ONLINE },
- { 0x20200, 1, RI_E1E1H_ONLINE },
- { 0x20204, 1, RI_ALL_ONLINE },
- { 0x20208, 1, RI_E1E1H_ONLINE },
- { 0x2020c, 39, RI_ALL_ONLINE },
- { 0x202c8, 1, RI_E2E3E3B0_ONLINE },
- { 0x202d8, 4, RI_E2E3E3B0_ONLINE },
- { 0x202f0, 1, RI_E3B0_ONLINE },
- { 0x20400, 2, RI_ALL_ONLINE },
- { 0x2040c, 8, RI_ALL_ONLINE },
- { 0x2042c, 18, RI_E1HE2E3E3B0_ONLINE },
- { 0x20480, 1, RI_ALL_ONLINE },
- { 0x20500, 1, RI_ALL_ONLINE },
- { 0x20600, 1, RI_ALL_ONLINE },
- { 0x28000, 1, RI_ALL_ONLINE },
- { 0x28004, 8191, RI_ALL_OFFLINE },
- { 0x30000, 1, RI_ALL_ONLINE },
- { 0x30004, 16383, RI_ALL_OFFLINE },
- { 0x40000, 98, RI_ALL_ONLINE },
- { 0x401a8, 8, RI_E1HE2E3E3B0_ONLINE },
- { 0x401c8, 1, RI_E1H_ONLINE },
- { 0x401cc, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x401d4, 2, RI_E2E3E3B0_ONLINE },
- { 0x40200, 4, RI_ALL_ONLINE },
- { 0x40220, 6, RI_E2E3E3B0_ONLINE },
- { 0x40238, 8, RI_E2E3_ONLINE },
- { 0x40258, 4, RI_E2E3E3B0_ONLINE },
- { 0x40268, 2, RI_E3E3B0_ONLINE },
- { 0x40270, 17, RI_E3B0_ONLINE },
- { 0x40400, 43, RI_ALL_ONLINE },
- { 0x404cc, 3, RI_E1HE2E3E3B0_ONLINE },
- { 0x404e0, 1, RI_E2E3E3B0_ONLINE },
- { 0x40500, 2, RI_ALL_ONLINE },
- { 0x40510, 2, RI_ALL_ONLINE },
- { 0x40520, 2, RI_ALL_ONLINE },
- { 0x40530, 2, RI_ALL_ONLINE },
- { 0x40540, 2, RI_ALL_ONLINE },
- { 0x40550, 10, RI_E2E3E3B0_ONLINE },
- { 0x40610, 2, RI_E2E3E3B0_ONLINE },
- { 0x42000, 164, RI_ALL_ONLINE },
- { 0x422c0, 4, RI_E2E3E3B0_ONLINE },
- { 0x422d4, 5, RI_E1HE2E3E3B0_ONLINE },
- { 0x422e8, 1, RI_E2E3E3B0_ONLINE },
- { 0x42400, 49, RI_ALL_ONLINE },
- { 0x424c8, 38, RI_ALL_ONLINE },
- { 0x42568, 2, RI_ALL_ONLINE },
- { 0x42640, 5, RI_E2E3E3B0_ONLINE },
- { 0x42800, 1, RI_ALL_ONLINE },
- { 0x50000, 1, RI_ALL_ONLINE },
- { 0x50004, 19, RI_ALL_ONLINE },
- { 0x50050, 8, RI_ALL_ONLINE },
- { 0x50070, 88, RI_ALL_ONLINE },
- { 0x501f0, 4, RI_E1HE2E3E3B0_ONLINE },
- { 0x50200, 2, RI_ALL_ONLINE },
- { 0x5020c, 7, RI_ALL_ONLINE },
- { 0x50228, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x50240, 1, RI_ALL_ONLINE },
- { 0x50280, 1, RI_ALL_ONLINE },
- { 0x50300, 1, RI_E2E3E3B0_ONLINE },
- { 0x5030c, 1, RI_E2E3E3B0_ONLINE },
- { 0x50318, 1, RI_E2E3E3B0_ONLINE },
- { 0x5031c, 1, RI_E2E3E3B0_ONLINE },
- { 0x50320, 2, RI_E2E3E3B0_ONLINE },
- { 0x50330, 1, RI_E3B0_ONLINE },
- { 0x52000, 1, RI_ALL_ONLINE },
- { 0x54000, 1, RI_ALL_ONLINE },
- { 0x54004, 3327, RI_ALL_OFFLINE },
- { 0x58000, 1, RI_ALL_ONLINE },
- { 0x58004, 8191, RI_E1E1H_OFFLINE },
- { 0x60000, 26, RI_ALL_ONLINE },
- { 0x60068, 8, RI_E1E1H_ONLINE },
- { 0x60088, 12, RI_ALL_ONLINE },
- { 0x600b8, 9, RI_E1E1H_ONLINE },
- { 0x600dc, 1, RI_ALL_ONLINE },
- { 0x600e0, 5, RI_E1E1H_ONLINE },
- { 0x600f4, 1, RI_E1E1HE2_ONLINE },
- { 0x600f8, 1, RI_E1E1H_ONLINE },
- { 0x600fc, 8, RI_ALL_ONLINE },
- { 0x6013c, 24, RI_E1H_ONLINE },
- { 0x6019c, 2, RI_E2E3E3B0_ONLINE },
- { 0x601ac, 18, RI_E2E3E3B0_ONLINE },
- { 0x60200, 1, RI_ALL_ONLINE },
- { 0x60204, 2, RI_ALL_OFFLINE },
- { 0x60210, 13, RI_E2E3E3B0_ONLINE },
- { 0x60244, 16, RI_E3B0_ONLINE },
- { 0x61000, 1, RI_ALL_ONLINE },
- { 0x61004, 511, RI_ALL_OFFLINE },
- { 0x61800, 512, RI_E3E3B0_OFFLINE },
- { 0x70000, 8, RI_ALL_ONLINE },
- { 0x70020, 8184, RI_ALL_OFFLINE },
- { 0x78000, 8192, RI_E3E3B0_OFFLINE },
- { 0x85000, 3, RI_ALL_OFFLINE },
- { 0x8501c, 7, RI_ALL_OFFLINE },
- { 0x85048, 1, RI_ALL_OFFLINE },
- { 0x85200, 32, RI_ALL_OFFLINE },
- { 0xb0000, 16384, RI_E1H_OFFLINE },
- { 0xc1000, 7, RI_ALL_ONLINE },
- { 0xc103c, 2, RI_E2E3E3B0_ONLINE },
- { 0xc1800, 2, RI_ALL_ONLINE },
- { 0xc2000, 164, RI_ALL_ONLINE },
- { 0xc22c0, 5, RI_E2E3E3B0_ONLINE },
- { 0xc22d8, 4, RI_E2E3E3B0_ONLINE },
- { 0xc2400, 49, RI_ALL_ONLINE },
- { 0xc24c8, 38, RI_ALL_ONLINE },
- { 0xc2568, 2, RI_ALL_ONLINE },
- { 0xc2600, 1, RI_ALL_ONLINE },
- { 0xc4000, 165, RI_ALL_ONLINE },
- { 0xc42d8, 2, RI_E2E3E3B0_ONLINE },
- { 0xc42e0, 7, RI_E1HE2E3E3B0_ONLINE },
- { 0xc42fc, 1, RI_E2E3E3B0_ONLINE },
- { 0xc4400, 51, RI_ALL_ONLINE },
- { 0xc44d0, 38, RI_ALL_ONLINE },
- { 0xc4570, 2, RI_ALL_ONLINE },
- { 0xc4578, 5, RI_E2E3E3B0_ONLINE },
- { 0xc4600, 1, RI_ALL_ONLINE },
- { 0xd0000, 19, RI_ALL_ONLINE },
- { 0xd004c, 8, RI_ALL_ONLINE },
- { 0xd006c, 91, RI_ALL_ONLINE },
- { 0xd01fc, 1, RI_E2E3E3B0_ONLINE },
- { 0xd0200, 2, RI_ALL_ONLINE },
- { 0xd020c, 7, RI_ALL_ONLINE },
- { 0xd0228, 18, RI_E1HE2E3E3B0_ONLINE },
- { 0xd0280, 1, RI_ALL_ONLINE },
- { 0xd0300, 1, RI_ALL_ONLINE },
- { 0xd0400, 1, RI_ALL_ONLINE },
- { 0xd0818, 1, RI_E3B0_ONLINE },
- { 0xd4000, 1, RI_ALL_ONLINE },
- { 0xd4004, 2559, RI_ALL_OFFLINE },
- { 0xd8000, 1, RI_ALL_ONLINE },
- { 0xd8004, 8191, RI_ALL_OFFLINE },
- { 0xe0000, 21, RI_ALL_ONLINE },
- { 0xe0054, 8, RI_ALL_ONLINE },
- { 0xe0074, 49, RI_ALL_ONLINE },
- { 0xe0138, 1, RI_E1E1H_ONLINE },
- { 0xe013c, 35, RI_ALL_ONLINE },
- { 0xe01f4, 1, RI_E2_ONLINE },
- { 0xe01f8, 1, RI_E2E3E3B0_ONLINE },
- { 0xe0200, 2, RI_ALL_ONLINE },
- { 0xe020c, 8, RI_ALL_ONLINE },
- { 0xe022c, 18, RI_E1HE2E3E3B0_ONLINE },
- { 0xe0280, 1, RI_ALL_ONLINE },
- { 0xe0300, 1, RI_ALL_ONLINE },
- { 0xe0400, 1, RI_E3B0_ONLINE },
- { 0xe1000, 1, RI_ALL_ONLINE },
- { 0xe2000, 1, RI_ALL_ONLINE },
- { 0xe2004, 2047, RI_ALL_OFFLINE },
- { 0xf0000, 1, RI_ALL_ONLINE },
- { 0xf0004, 16383, RI_ALL_OFFLINE },
- { 0x101000, 12, RI_ALL_ONLINE },
- { 0x101050, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x101054, 3, RI_E2E3E3B0_ONLINE },
- { 0x101100, 1, RI_ALL_ONLINE },
- { 0x101800, 8, RI_ALL_ONLINE },
- { 0x102000, 18, RI_ALL_ONLINE },
- { 0x102068, 6, RI_E2E3E3B0_ONLINE },
- { 0x102080, 17, RI_ALL_ONLINE },
- { 0x1020c8, 8, RI_E1H_ONLINE },
- { 0x1020e8, 9, RI_E2E3E3B0_ONLINE },
- { 0x102400, 1, RI_ALL_ONLINE },
- { 0x103000, 26, RI_ALL_ONLINE },
- { 0x103098, 5, RI_E1HE2E3E3B0_ONLINE },
- { 0x1030ac, 2, RI_E2E3E3B0_ONLINE },
- { 0x1030b4, 1, RI_E2_ONLINE },
- { 0x1030b8, 7, RI_E2E3E3B0_ONLINE },
- { 0x1030d8, 8, RI_E2E3E3B0_ONLINE },
- { 0x103400, 1, RI_E2E3E3B0_ONLINE },
- { 0x103404, 135, RI_E2E3E3B0_OFFLINE },
- { 0x103800, 8, RI_ALL_ONLINE },
- { 0x104000, 63, RI_ALL_ONLINE },
- { 0x10411c, 16, RI_E2E3E3B0_ONLINE },
- { 0x104200, 17, RI_ALL_ONLINE },
- { 0x104400, 64, RI_ALL_ONLINE },
- { 0x104500, 192, RI_ALL_OFFLINE },
- { 0x104800, 64, RI_ALL_ONLINE },
- { 0x104900, 192, RI_ALL_OFFLINE },
- { 0x105000, 256, RI_ALL_ONLINE },
- { 0x105400, 768, RI_ALL_OFFLINE },
- { 0x107000, 7, RI_E2E3E3B0_ONLINE },
- { 0x10701c, 1, RI_E3E3B0_ONLINE },
- { 0x108000, 33, RI_E1E1H_ONLINE },
- { 0x1080ac, 5, RI_E1H_ONLINE },
- { 0x108100, 5, RI_E1E1H_ONLINE },
- { 0x108120, 5, RI_E1E1H_ONLINE },
- { 0x108200, 74, RI_E1E1H_ONLINE },
- { 0x108400, 74, RI_E1E1H_ONLINE },
- { 0x108800, 152, RI_E1E1H_ONLINE },
- { 0x110000, 111, RI_E2E3E3B0_ONLINE },
- { 0x1101dc, 1, RI_E3E3B0_ONLINE },
- { 0x110200, 4, RI_E2E3E3B0_ONLINE },
- { 0x120000, 2, RI_ALL_ONLINE },
- { 0x120008, 4, RI_ALL_ONLINE },
- { 0x120018, 3, RI_ALL_ONLINE },
- { 0x120024, 4, RI_ALL_ONLINE },
- { 0x120034, 3, RI_ALL_ONLINE },
- { 0x120040, 4, RI_ALL_ONLINE },
- { 0x120050, 3, RI_ALL_ONLINE },
- { 0x12005c, 4, RI_ALL_ONLINE },
- { 0x12006c, 3, RI_ALL_ONLINE },
- { 0x120078, 4, RI_ALL_ONLINE },
- { 0x120088, 3, RI_ALL_ONLINE },
- { 0x120094, 4, RI_ALL_ONLINE },
- { 0x1200a4, 3, RI_ALL_ONLINE },
- { 0x1200b0, 4, RI_ALL_ONLINE },
- { 0x1200c0, 3, RI_ALL_ONLINE },
- { 0x1200cc, 4, RI_ALL_ONLINE },
- { 0x1200dc, 3, RI_ALL_ONLINE },
- { 0x1200e8, 4, RI_ALL_ONLINE },
- { 0x1200f8, 3, RI_ALL_ONLINE },
- { 0x120104, 4, RI_ALL_ONLINE },
- { 0x120114, 1, RI_ALL_ONLINE },
- { 0x120118, 22, RI_ALL_ONLINE },
- { 0x120170, 2, RI_E1E1H_ONLINE },
- { 0x120178, 243, RI_ALL_ONLINE },
- { 0x120544, 4, RI_E1E1H_ONLINE },
- { 0x120554, 6, RI_ALL_ONLINE },
- { 0x12059c, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x1205b4, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1205b8, 15, RI_E1HE2E3E3B0_ONLINE },
- { 0x1205f4, 1, RI_E1HE2_ONLINE },
- { 0x1205f8, 4, RI_E2E3E3B0_ONLINE },
- { 0x120618, 1, RI_E2E3E3B0_ONLINE },
- { 0x12061c, 20, RI_E1HE2E3E3B0_ONLINE },
- { 0x12066c, 11, RI_E1HE2E3E3B0_ONLINE },
- { 0x120698, 3, RI_E2E3E3B0_ONLINE },
- { 0x1206a4, 1, RI_E2_ONLINE },
- { 0x1206a8, 1, RI_E2E3E3B0_ONLINE },
- { 0x1206b0, 75, RI_E2E3E3B0_ONLINE },
- { 0x1207dc, 1, RI_E2_ONLINE },
- { 0x1207fc, 1, RI_E2E3E3B0_ONLINE },
- { 0x12080c, 65, RI_ALL_ONLINE },
- { 0x120910, 7, RI_E2E3E3B0_ONLINE },
- { 0x120930, 9, RI_E2E3E3B0_ONLINE },
- { 0x12095c, 37, RI_E3E3B0_ONLINE },
- { 0x120a00, 2, RI_E1E1HE2_ONLINE },
- { 0x120b00, 1, RI_E3E3B0_ONLINE },
- { 0x122000, 2, RI_ALL_ONLINE },
- { 0x122008, 2046, RI_E1_OFFLINE },
- { 0x128000, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x128008, 6142, RI_E1HE2E3E3B0_OFFLINE },
- { 0x130000, 35, RI_E2E3E3B0_ONLINE },
- { 0x130100, 29, RI_E2E3E3B0_ONLINE },
- { 0x130180, 1, RI_E2E3E3B0_ONLINE },
- { 0x130200, 1, RI_E2E3E3B0_ONLINE },
- { 0x130280, 1, RI_E2E3E3B0_ONLINE },
- { 0x130300, 5, RI_E2E3E3B0_ONLINE },
- { 0x130380, 1, RI_E2E3E3B0_ONLINE },
- { 0x130400, 1, RI_E2E3E3B0_ONLINE },
- { 0x130480, 5, RI_E2E3E3B0_ONLINE },
- { 0x130800, 72, RI_E2E3E3B0_ONLINE },
- { 0x131000, 136, RI_E2E3E3B0_ONLINE },
- { 0x132000, 148, RI_E2E3E3B0_ONLINE },
- { 0x134000, 544, RI_E2E3E3B0_ONLINE },
- { 0x140000, 1, RI_ALL_ONLINE },
- { 0x140004, 9, RI_E1E1HE2E3_ONLINE },
- { 0x140028, 8, RI_ALL_ONLINE },
- { 0x140048, 10, RI_E1E1HE2E3_ONLINE },
- { 0x140070, 1, RI_ALL_ONLINE },
- { 0x140074, 10, RI_E1E1HE2E3_ONLINE },
- { 0x14009c, 1, RI_ALL_ONLINE },
- { 0x1400a0, 5, RI_E1E1HE2E3_ONLINE },
- { 0x1400b4, 7, RI_ALL_ONLINE },
- { 0x1400d0, 10, RI_E1E1HE2E3_ONLINE },
- { 0x1400f8, 2, RI_ALL_ONLINE },
- { 0x140100, 5, RI_E1E1H_ONLINE },
- { 0x140114, 5, RI_E1E1HE2E3_ONLINE },
- { 0x140128, 7, RI_ALL_ONLINE },
- { 0x140144, 9, RI_E1E1HE2E3_ONLINE },
- { 0x140168, 8, RI_ALL_ONLINE },
- { 0x140188, 3, RI_E1E1HE2E3_ONLINE },
- { 0x140194, 13, RI_ALL_ONLINE },
- { 0x140200, 6, RI_E1E1HE2E3_ONLINE },
- { 0x140260, 4, RI_E2E3_ONLINE },
- { 0x140280, 4, RI_E2E3_ONLINE },
- { 0x1402e0, 2, RI_E2E3_ONLINE },
- { 0x1402e8, 2, RI_E2E3E3B0_ONLINE },
- { 0x1402f0, 9, RI_E2E3_ONLINE },
- { 0x140314, 44, RI_E3B0_ONLINE },
- { 0x144000, 4, RI_E1E1H_ONLINE },
- { 0x148000, 4, RI_E1E1H_ONLINE },
- { 0x14c000, 4, RI_E1E1H_ONLINE },
- { 0x150000, 4, RI_E1E1H_ONLINE },
- { 0x154000, 4, RI_E1E1H_ONLINE },
- { 0x158000, 4, RI_E1E1H_ONLINE },
- { 0x15c000, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x15c008, 5, RI_E1H_ONLINE },
- { 0x15c020, 8, RI_E2E3E3B0_ONLINE },
- { 0x15c040, 1, RI_E2E3_ONLINE },
- { 0x15c044, 2, RI_E2E3E3B0_ONLINE },
- { 0x15c04c, 8, RI_E2E3_ONLINE },
- { 0x15c06c, 8, RI_E2E3E3B0_ONLINE },
- { 0x15c090, 13, RI_E2E3E3B0_ONLINE },
- { 0x15c0c8, 24, RI_E2E3E3B0_ONLINE },
- { 0x15c128, 2, RI_E2E3_ONLINE },
- { 0x15c130, 8, RI_E2E3E3B0_ONLINE },
- { 0x15c150, 2, RI_E3E3B0_ONLINE },
- { 0x15c158, 2, RI_E3_ONLINE },
- { 0x15c160, 149, RI_E3B0_ONLINE },
- { 0x161000, 7, RI_ALL_ONLINE },
- { 0x16103c, 2, RI_E2E3E3B0_ONLINE },
- { 0x161800, 2, RI_ALL_ONLINE },
- { 0x162000, 54, RI_E3E3B0_ONLINE },
- { 0x162200, 60, RI_E3E3B0_ONLINE },
- { 0x162400, 54, RI_E3E3B0_ONLINE },
- { 0x162600, 60, RI_E3E3B0_ONLINE },
- { 0x162800, 54, RI_E3E3B0_ONLINE },
- { 0x162a00, 60, RI_E3E3B0_ONLINE },
- { 0x162c00, 54, RI_E3E3B0_ONLINE },
- { 0x162e00, 60, RI_E3E3B0_ONLINE },
- { 0x164000, 60, RI_ALL_ONLINE },
- { 0x164110, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x164118, 15, RI_E2E3E3B0_ONLINE },
- { 0x164200, 1, RI_ALL_ONLINE },
- { 0x164208, 1, RI_ALL_ONLINE },
- { 0x164210, 1, RI_ALL_ONLINE },
- { 0x164218, 1, RI_ALL_ONLINE },
- { 0x164220, 1, RI_ALL_ONLINE },
- { 0x164228, 1, RI_ALL_ONLINE },
- { 0x164230, 1, RI_ALL_ONLINE },
- { 0x164238, 1, RI_ALL_ONLINE },
- { 0x164240, 1, RI_ALL_ONLINE },
- { 0x164248, 1, RI_ALL_ONLINE },
- { 0x164250, 1, RI_ALL_ONLINE },
- { 0x164258, 1, RI_ALL_ONLINE },
- { 0x164260, 1, RI_ALL_ONLINE },
- { 0x164270, 2, RI_ALL_ONLINE },
- { 0x164280, 2, RI_ALL_ONLINE },
- { 0x164800, 2, RI_ALL_ONLINE },
- { 0x165000, 2, RI_ALL_ONLINE },
- { 0x166000, 164, RI_ALL_ONLINE },
- { 0x1662cc, 7, RI_E2E3E3B0_ONLINE },
- { 0x166400, 49, RI_ALL_ONLINE },
- { 0x1664c8, 38, RI_ALL_ONLINE },
- { 0x166568, 2, RI_ALL_ONLINE },
- { 0x166570, 5, RI_E2E3E3B0_ONLINE },
- { 0x166800, 1, RI_ALL_ONLINE },
- { 0x168000, 137, RI_ALL_ONLINE },
- { 0x168224, 2, RI_E1E1H_ONLINE },
- { 0x16822c, 29, RI_ALL_ONLINE },
- { 0x1682a0, 12, RI_E1E1H_ONLINE },
- { 0x1682d0, 12, RI_ALL_ONLINE },
- { 0x168300, 2, RI_E1E1H_ONLINE },
- { 0x168308, 68, RI_ALL_ONLINE },
- { 0x168418, 2, RI_E1E1H_ONLINE },
- { 0x168420, 6, RI_ALL_ONLINE },
- { 0x168800, 19, RI_ALL_ONLINE },
- { 0x168900, 1, RI_ALL_ONLINE },
- { 0x168a00, 128, RI_ALL_ONLINE },
- { 0x16a000, 1, RI_ALL_ONLINE },
- { 0x16a004, 1535, RI_ALL_OFFLINE },
- { 0x16c000, 1, RI_ALL_ONLINE },
- { 0x16c004, 1535, RI_ALL_OFFLINE },
- { 0x16e000, 16, RI_E1H_ONLINE },
- { 0x16e040, 8, RI_E2E3E3B0_ONLINE },
- { 0x16e100, 1, RI_E1H_ONLINE },
- { 0x16e200, 2, RI_E1H_ONLINE },
- { 0x16e400, 161, RI_E1H_ONLINE },
- { 0x16e684, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x16e68c, 12, RI_E1H_ONLINE },
- { 0x16e6bc, 4, RI_E1HE2E3E3B0_ONLINE },
- { 0x16e6cc, 4, RI_E1H_ONLINE },
- { 0x16e6e0, 2, RI_E2E3E3B0_ONLINE },
- { 0x16e6e8, 5, RI_E2E3_ONLINE },
- { 0x16e6fc, 5, RI_E2E3E3B0_ONLINE },
- { 0x16e768, 17, RI_E2E3E3B0_ONLINE },
- { 0x16e7ac, 12, RI_E3B0_ONLINE },
- { 0x170000, 24, RI_ALL_ONLINE },
- { 0x170060, 4, RI_E1E1H_ONLINE },
- { 0x170070, 65, RI_ALL_ONLINE },
- { 0x170194, 11, RI_E2E3E3B0_ONLINE },
- { 0x1701c4, 1, RI_E2E3E3B0_ONLINE },
- { 0x1701cc, 7, RI_E2E3E3B0_ONLINE },
- { 0x1701e8, 1, RI_E3E3B0_ONLINE },
- { 0x1701ec, 1, RI_E2E3E3B0_ONLINE },
- { 0x1701f4, 1, RI_E2E3E3B0_ONLINE },
- { 0x170200, 4, RI_ALL_ONLINE },
- { 0x170214, 1, RI_ALL_ONLINE },
- { 0x170218, 77, RI_E2E3E3B0_ONLINE },
- { 0x170400, 64, RI_E2E3E3B0_ONLINE },
- { 0x178000, 1, RI_ALL_ONLINE },
- { 0x180000, 61, RI_ALL_ONLINE },
- { 0x18013c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x180200, 58, RI_ALL_ONLINE },
- { 0x180340, 4, RI_ALL_ONLINE },
- { 0x180380, 1, RI_E2E3E3B0_ONLINE },
- { 0x180388, 1, RI_E2E3E3B0_ONLINE },
- { 0x180390, 1, RI_E2E3E3B0_ONLINE },
- { 0x180398, 1, RI_E2E3E3B0_ONLINE },
- { 0x1803a0, 5, RI_E2E3E3B0_ONLINE },
- { 0x1803b4, 2, RI_E3E3B0_ONLINE },
- { 0x180404, 255, RI_E1E1H_OFFLINE },
- { 0x181000, 4, RI_ALL_ONLINE },
- { 0x181010, 1020, RI_ALL_OFFLINE },
- { 0x182000, 4, RI_E3E3B0_ONLINE },
- { 0x1a0000, 1, RI_ALL_ONLINE },
- { 0x1a0004, 5631, RI_ALL_OFFLINE },
- { 0x1a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x1a8000, 1, RI_ALL_ONLINE },
- { 0x1a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x1b0000, 1, RI_ALL_ONLINE },
- { 0x1b0004, 15, RI_E1H_OFFLINE },
- { 0x1b0040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b0044, 239, RI_E1H_OFFLINE },
- { 0x1b0400, 1, RI_ALL_ONLINE },
- { 0x1b0404, 255, RI_E1H_OFFLINE },
- { 0x1b0800, 1, RI_ALL_ONLINE },
- { 0x1b0840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b0c00, 1, RI_ALL_ONLINE },
- { 0x1b1000, 1, RI_ALL_ONLINE },
- { 0x1b1040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b1400, 1, RI_ALL_ONLINE },
- { 0x1b1440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b1480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b1800, 128, RI_ALL_OFFLINE },
- { 0x1b1c00, 128, RI_ALL_OFFLINE },
- { 0x1b2000, 1, RI_ALL_ONLINE },
- { 0x1b2400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b2404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x1b8000, 1, RI_ALL_ONLINE },
- { 0x1b8040, 1, RI_ALL_ONLINE },
- { 0x1b8080, 1, RI_ALL_ONLINE },
- { 0x1b80c0, 1, RI_ALL_ONLINE },
- { 0x1b8100, 1, RI_ALL_ONLINE },
- { 0x1b8140, 1, RI_ALL_ONLINE },
- { 0x1b8180, 1, RI_ALL_ONLINE },
- { 0x1b81c0, 1, RI_ALL_ONLINE },
- { 0x1b8200, 1, RI_ALL_ONLINE },
- { 0x1b8240, 1, RI_ALL_ONLINE },
- { 0x1b8280, 1, RI_ALL_ONLINE },
- { 0x1b82c0, 1, RI_ALL_ONLINE },
- { 0x1b8300, 1, RI_ALL_ONLINE },
- { 0x1b8340, 1, RI_ALL_ONLINE },
- { 0x1b8380, 1, RI_ALL_ONLINE },
- { 0x1b83c0, 1, RI_ALL_ONLINE },
- { 0x1b8400, 1, RI_ALL_ONLINE },
- { 0x1b8440, 1, RI_ALL_ONLINE },
- { 0x1b8480, 1, RI_ALL_ONLINE },
- { 0x1b84c0, 1, RI_ALL_ONLINE },
- { 0x1b8500, 1, RI_ALL_ONLINE },
- { 0x1b8540, 1, RI_ALL_ONLINE },
- { 0x1b8580, 1, RI_ALL_ONLINE },
- { 0x1b85c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x1b8800, 1, RI_ALL_ONLINE },
- { 0x1b8840, 1, RI_ALL_ONLINE },
- { 0x1b8880, 1, RI_ALL_ONLINE },
- { 0x1b88c0, 1, RI_ALL_ONLINE },
- { 0x1b8900, 1, RI_ALL_ONLINE },
- { 0x1b8940, 1, RI_ALL_ONLINE },
- { 0x1b8980, 1, RI_ALL_ONLINE },
- { 0x1b89c0, 1, RI_ALL_ONLINE },
- { 0x1b8a00, 1, RI_ALL_ONLINE },
- { 0x1b8a40, 1, RI_ALL_ONLINE },
- { 0x1b8a80, 1, RI_ALL_ONLINE },
- { 0x1b8ac0, 1, RI_ALL_ONLINE },
- { 0x1b8b00, 1, RI_ALL_ONLINE },
- { 0x1b8b40, 1, RI_ALL_ONLINE },
- { 0x1b8b80, 1, RI_ALL_ONLINE },
- { 0x1b8bc0, 1, RI_ALL_ONLINE },
- { 0x1b8c00, 1, RI_ALL_ONLINE },
- { 0x1b8c40, 1, RI_ALL_ONLINE },
- { 0x1b8c80, 1, RI_ALL_ONLINE },
- { 0x1b8cc0, 1, RI_ALL_ONLINE },
- { 0x1b8cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x1b8d00, 1, RI_ALL_ONLINE },
- { 0x1b8d40, 1, RI_ALL_ONLINE },
- { 0x1b8d80, 1, RI_ALL_ONLINE },
- { 0x1b8dc0, 1, RI_ALL_ONLINE },
- { 0x1b8e00, 1, RI_ALL_ONLINE },
- { 0x1b8e40, 1, RI_ALL_ONLINE },
- { 0x1b8e80, 1, RI_ALL_ONLINE },
- { 0x1b8e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x1b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x1b8fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x1b8fe8, 2, RI_E3E3B0_ONLINE },
- { 0x1b9000, 1, RI_E2E3E3B0_ONLINE },
- { 0x1b9040, 3, RI_E2E3E3B0_ONLINE },
- { 0x1b905c, 1, RI_E3E3B0_ONLINE },
- { 0x1b9064, 1, RI_E3B0_ONLINE },
- { 0x1b9080, 10, RI_E3B0_ONLINE },
- { 0x1b9400, 14, RI_E2E3E3B0_OFFLINE },
- { 0x1b943c, 19, RI_E2E3E3B0_OFFLINE },
- { 0x1b9490, 10, RI_E2E3E3B0_OFFLINE },
- { 0x1c0000, 2, RI_ALL_ONLINE },
- { 0x200000, 65, RI_ALL_ONLINE },
- { 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x200200, 58, RI_ALL_ONLINE },
- { 0x200340, 4, RI_ALL_ONLINE },
- { 0x200380, 1, RI_E2E3E3B0_ONLINE },
- { 0x200388, 1, RI_E2E3E3B0_ONLINE },
- { 0x200390, 1, RI_E2E3E3B0_ONLINE },
- { 0x200398, 1, RI_E2E3E3B0_ONLINE },
- { 0x2003a0, 1, RI_E2E3E3B0_ONLINE },
- { 0x2003a8, 2, RI_E2E3E3B0_ONLINE },
- { 0x200404, 255, RI_E1E1H_OFFLINE },
- { 0x202000, 4, RI_ALL_ONLINE },
- { 0x202010, 2044, RI_ALL_OFFLINE },
- { 0x204000, 4, RI_E3E3B0_ONLINE },
- { 0x220000, 1, RI_ALL_ONLINE },
- { 0x220004, 5631, RI_ALL_OFFLINE },
- { 0x225800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x228000, 1, RI_ALL_ONLINE },
- { 0x228004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x230000, 1, RI_ALL_ONLINE },
- { 0x230004, 15, RI_E1H_OFFLINE },
- { 0x230040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x230044, 239, RI_E1H_OFFLINE },
- { 0x230400, 1, RI_ALL_ONLINE },
- { 0x230404, 255, RI_E1H_OFFLINE },
- { 0x230800, 1, RI_ALL_ONLINE },
- { 0x230840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x230c00, 1, RI_ALL_ONLINE },
- { 0x231000, 1, RI_ALL_ONLINE },
- { 0x231040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x231400, 1, RI_ALL_ONLINE },
- { 0x231440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x231480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2314c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x231800, 128, RI_ALL_OFFLINE },
- { 0x231c00, 128, RI_ALL_OFFLINE },
- { 0x232000, 1, RI_ALL_ONLINE },
- { 0x232400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x232404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x238000, 1, RI_ALL_ONLINE },
- { 0x238040, 1, RI_ALL_ONLINE },
- { 0x238080, 1, RI_ALL_ONLINE },
- { 0x2380c0, 1, RI_ALL_ONLINE },
- { 0x238100, 1, RI_ALL_ONLINE },
- { 0x238140, 1, RI_ALL_ONLINE },
- { 0x238180, 1, RI_ALL_ONLINE },
- { 0x2381c0, 1, RI_ALL_ONLINE },
- { 0x238200, 1, RI_ALL_ONLINE },
- { 0x238240, 1, RI_ALL_ONLINE },
- { 0x238280, 1, RI_ALL_ONLINE },
- { 0x2382c0, 1, RI_ALL_ONLINE },
- { 0x238300, 1, RI_ALL_ONLINE },
- { 0x238340, 1, RI_ALL_ONLINE },
- { 0x238380, 1, RI_ALL_ONLINE },
- { 0x2383c0, 1, RI_ALL_ONLINE },
- { 0x238400, 1, RI_ALL_ONLINE },
- { 0x238440, 1, RI_ALL_ONLINE },
- { 0x238480, 1, RI_ALL_ONLINE },
- { 0x2384c0, 1, RI_ALL_ONLINE },
- { 0x238500, 1, RI_ALL_ONLINE },
- { 0x238540, 1, RI_ALL_ONLINE },
- { 0x238580, 1, RI_ALL_ONLINE },
- { 0x2385c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x238800, 1, RI_ALL_ONLINE },
- { 0x238840, 1, RI_ALL_ONLINE },
- { 0x238880, 1, RI_ALL_ONLINE },
- { 0x2388c0, 1, RI_ALL_ONLINE },
- { 0x238900, 1, RI_ALL_ONLINE },
- { 0x238940, 1, RI_ALL_ONLINE },
- { 0x238980, 1, RI_ALL_ONLINE },
- { 0x2389c0, 1, RI_ALL_ONLINE },
- { 0x238a00, 1, RI_ALL_ONLINE },
- { 0x238a40, 1, RI_ALL_ONLINE },
- { 0x238a80, 1, RI_ALL_ONLINE },
- { 0x238ac0, 1, RI_ALL_ONLINE },
- { 0x238b00, 1, RI_ALL_ONLINE },
- { 0x238b40, 1, RI_ALL_ONLINE },
- { 0x238b80, 1, RI_ALL_ONLINE },
- { 0x238bc0, 1, RI_ALL_ONLINE },
- { 0x238c00, 1, RI_ALL_ONLINE },
- { 0x238c40, 1, RI_ALL_ONLINE },
- { 0x238c80, 1, RI_ALL_ONLINE },
- { 0x238cc0, 1, RI_ALL_ONLINE },
- { 0x238cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x238d00, 1, RI_ALL_ONLINE },
- { 0x238d40, 1, RI_ALL_ONLINE },
- { 0x238d80, 1, RI_ALL_ONLINE },
- { 0x238dc0, 1, RI_ALL_ONLINE },
- { 0x238e00, 1, RI_ALL_ONLINE },
- { 0x238e40, 1, RI_ALL_ONLINE },
- { 0x238e80, 1, RI_ALL_ONLINE },
- { 0x238e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x238ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x238fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x238fe8, 2, RI_E3E3B0_ONLINE },
- { 0x239000, 1, RI_E2E3E3B0_ONLINE },
- { 0x239040, 3, RI_E2E3E3B0_ONLINE },
- { 0x23905c, 1, RI_E3E3B0_ONLINE },
- { 0x239064, 1, RI_E3B0_ONLINE },
- { 0x239080, 10, RI_E3B0_ONLINE },
- { 0x240000, 2, RI_ALL_ONLINE },
- { 0x280000, 65, RI_ALL_ONLINE },
- { 0x28014c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x280200, 58, RI_ALL_ONLINE },
- { 0x280340, 4, RI_ALL_ONLINE },
- { 0x280380, 1, RI_E2E3E3B0_ONLINE },
- { 0x280388, 1, RI_E2E3E3B0_ONLINE },
- { 0x280390, 1, RI_E2E3E3B0_ONLINE },
- { 0x280398, 1, RI_E2E3E3B0_ONLINE },
- { 0x2803a0, 1, RI_E2E3E3B0_ONLINE },
- { 0x2803a8, 2, RI_E2E3E3B0_ONLINE },
- { 0x280404, 255, RI_E1E1H_OFFLINE },
- { 0x282000, 4, RI_ALL_ONLINE },
- { 0x282010, 2044, RI_ALL_OFFLINE },
- { 0x284000, 4, RI_E3E3B0_ONLINE },
- { 0x2a0000, 1, RI_ALL_ONLINE },
- { 0x2a0004, 5631, RI_ALL_OFFLINE },
- { 0x2a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x2a8000, 1, RI_ALL_ONLINE },
- { 0x2a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x2b0000, 1, RI_ALL_ONLINE },
- { 0x2b0004, 15, RI_E1H_OFFLINE },
- { 0x2b0040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b0044, 239, RI_E1H_OFFLINE },
- { 0x2b0400, 1, RI_ALL_ONLINE },
- { 0x2b0404, 255, RI_E1H_OFFLINE },
- { 0x2b0800, 1, RI_ALL_ONLINE },
- { 0x2b0840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b0c00, 1, RI_ALL_ONLINE },
- { 0x2b1000, 1, RI_ALL_ONLINE },
- { 0x2b1040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b1400, 1, RI_ALL_ONLINE },
- { 0x2b1440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b1480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b1800, 128, RI_ALL_OFFLINE },
- { 0x2b1c00, 128, RI_ALL_OFFLINE },
- { 0x2b2000, 1, RI_ALL_ONLINE },
- { 0x2b2400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b2404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x2b8000, 1, RI_ALL_ONLINE },
- { 0x2b8040, 1, RI_ALL_ONLINE },
- { 0x2b8080, 1, RI_ALL_ONLINE },
- { 0x2b80c0, 1, RI_ALL_ONLINE },
- { 0x2b8100, 1, RI_ALL_ONLINE },
- { 0x2b8140, 1, RI_ALL_ONLINE },
- { 0x2b8180, 1, RI_ALL_ONLINE },
- { 0x2b81c0, 1, RI_ALL_ONLINE },
- { 0x2b8200, 1, RI_ALL_ONLINE },
- { 0x2b8240, 1, RI_ALL_ONLINE },
- { 0x2b8280, 1, RI_ALL_ONLINE },
- { 0x2b82c0, 1, RI_ALL_ONLINE },
- { 0x2b8300, 1, RI_ALL_ONLINE },
- { 0x2b8340, 1, RI_ALL_ONLINE },
- { 0x2b8380, 1, RI_ALL_ONLINE },
- { 0x2b83c0, 1, RI_ALL_ONLINE },
- { 0x2b8400, 1, RI_ALL_ONLINE },
- { 0x2b8440, 1, RI_ALL_ONLINE },
- { 0x2b8480, 1, RI_ALL_ONLINE },
- { 0x2b84c0, 1, RI_ALL_ONLINE },
- { 0x2b8500, 1, RI_ALL_ONLINE },
- { 0x2b8540, 1, RI_ALL_ONLINE },
- { 0x2b8580, 1, RI_ALL_ONLINE },
- { 0x2b85c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x2b8800, 1, RI_ALL_ONLINE },
- { 0x2b8840, 1, RI_ALL_ONLINE },
- { 0x2b8880, 1, RI_ALL_ONLINE },
- { 0x2b88c0, 1, RI_ALL_ONLINE },
- { 0x2b8900, 1, RI_ALL_ONLINE },
- { 0x2b8940, 1, RI_ALL_ONLINE },
- { 0x2b8980, 1, RI_ALL_ONLINE },
- { 0x2b89c0, 1, RI_ALL_ONLINE },
- { 0x2b8a00, 1, RI_ALL_ONLINE },
- { 0x2b8a40, 1, RI_ALL_ONLINE },
- { 0x2b8a80, 1, RI_ALL_ONLINE },
- { 0x2b8ac0, 1, RI_ALL_ONLINE },
- { 0x2b8b00, 1, RI_ALL_ONLINE },
- { 0x2b8b40, 1, RI_ALL_ONLINE },
- { 0x2b8b80, 1, RI_ALL_ONLINE },
- { 0x2b8bc0, 1, RI_ALL_ONLINE },
- { 0x2b8c00, 1, RI_ALL_ONLINE },
- { 0x2b8c40, 1, RI_ALL_ONLINE },
- { 0x2b8c80, 1, RI_ALL_ONLINE },
- { 0x2b8cc0, 1, RI_ALL_ONLINE },
- { 0x2b8cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x2b8d00, 1, RI_ALL_ONLINE },
- { 0x2b8d40, 1, RI_ALL_ONLINE },
- { 0x2b8d80, 1, RI_ALL_ONLINE },
- { 0x2b8dc0, 1, RI_ALL_ONLINE },
- { 0x2b8e00, 1, RI_ALL_ONLINE },
- { 0x2b8e40, 1, RI_ALL_ONLINE },
- { 0x2b8e80, 1, RI_ALL_ONLINE },
- { 0x2b8e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x2b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x2b8fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x2b8fe8, 2, RI_E3E3B0_ONLINE },
- { 0x2b9000, 1, RI_E2E3E3B0_ONLINE },
- { 0x2b9040, 3, RI_E2E3E3B0_ONLINE },
- { 0x2b905c, 1, RI_E3E3B0_ONLINE },
- { 0x2b9064, 1, RI_E3B0_ONLINE },
- { 0x2b9080, 10, RI_E3B0_ONLINE },
- { 0x2b9400, 14, RI_E2E3E3B0_ONLINE },
- { 0x2b943c, 19, RI_E2E3E3B0_ONLINE },
- { 0x2b9490, 10, RI_E2E3E3B0_ONLINE },
- { 0x2c0000, 2, RI_ALL_ONLINE },
- { 0x300000, 65, RI_ALL_ONLINE },
- { 0x30014c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x300200, 58, RI_ALL_ONLINE },
- { 0x300340, 4, RI_ALL_ONLINE },
- { 0x300380, 1, RI_E2E3E3B0_ONLINE },
- { 0x300388, 1, RI_E2E3E3B0_ONLINE },
- { 0x300390, 1, RI_E2E3E3B0_ONLINE },
- { 0x300398, 1, RI_E2E3E3B0_ONLINE },
- { 0x3003a0, 1, RI_E2E3E3B0_ONLINE },
- { 0x3003a8, 2, RI_E2E3E3B0_ONLINE },
- { 0x300404, 255, RI_E1E1H_OFFLINE },
- { 0x302000, 4, RI_ALL_ONLINE },
- { 0x302010, 2044, RI_ALL_OFFLINE },
- { 0x304000, 4, RI_E3E3B0_ONLINE },
- { 0x320000, 1, RI_ALL_ONLINE },
- { 0x320004, 5631, RI_ALL_OFFLINE },
- { 0x325800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x328000, 1, RI_ALL_ONLINE },
- { 0x328004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x330000, 1, RI_ALL_ONLINE },
- { 0x330004, 15, RI_E1H_OFFLINE },
- { 0x330040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x330044, 239, RI_E1H_OFFLINE },
- { 0x330400, 1, RI_ALL_ONLINE },
- { 0x330404, 255, RI_E1H_OFFLINE },
- { 0x330800, 1, RI_ALL_ONLINE },
- { 0x330840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x330c00, 1, RI_ALL_ONLINE },
- { 0x331000, 1, RI_ALL_ONLINE },
- { 0x331040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x331400, 1, RI_ALL_ONLINE },
- { 0x331440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x331480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x3314c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x331800, 128, RI_ALL_OFFLINE },
- { 0x331c00, 128, RI_ALL_OFFLINE },
- { 0x332000, 1, RI_ALL_ONLINE },
- { 0x332400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x332404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x338000, 1, RI_ALL_ONLINE },
- { 0x338040, 1, RI_ALL_ONLINE },
- { 0x338080, 1, RI_ALL_ONLINE },
- { 0x3380c0, 1, RI_ALL_ONLINE },
- { 0x338100, 1, RI_ALL_ONLINE },
- { 0x338140, 1, RI_ALL_ONLINE },
- { 0x338180, 1, RI_ALL_ONLINE },
- { 0x3381c0, 1, RI_ALL_ONLINE },
- { 0x338200, 1, RI_ALL_ONLINE },
- { 0x338240, 1, RI_ALL_ONLINE },
- { 0x338280, 1, RI_ALL_ONLINE },
- { 0x3382c0, 1, RI_ALL_ONLINE },
- { 0x338300, 1, RI_ALL_ONLINE },
- { 0x338340, 1, RI_ALL_ONLINE },
- { 0x338380, 1, RI_ALL_ONLINE },
- { 0x3383c0, 1, RI_ALL_ONLINE },
- { 0x338400, 1, RI_ALL_ONLINE },
- { 0x338440, 1, RI_ALL_ONLINE },
- { 0x338480, 1, RI_ALL_ONLINE },
- { 0x3384c0, 1, RI_ALL_ONLINE },
- { 0x338500, 1, RI_ALL_ONLINE },
- { 0x338540, 1, RI_ALL_ONLINE },
- { 0x338580, 1, RI_ALL_ONLINE },
- { 0x3385c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x338800, 1, RI_ALL_ONLINE },
- { 0x338840, 1, RI_ALL_ONLINE },
- { 0x338880, 1, RI_ALL_ONLINE },
- { 0x3388c0, 1, RI_ALL_ONLINE },
- { 0x338900, 1, RI_ALL_ONLINE },
- { 0x338940, 1, RI_ALL_ONLINE },
- { 0x338980, 1, RI_ALL_ONLINE },
- { 0x3389c0, 1, RI_ALL_ONLINE },
- { 0x338a00, 1, RI_ALL_ONLINE },
- { 0x338a40, 1, RI_ALL_ONLINE },
- { 0x338a80, 1, RI_ALL_ONLINE },
- { 0x338ac0, 1, RI_ALL_ONLINE },
- { 0x338b00, 1, RI_ALL_ONLINE },
- { 0x338b40, 1, RI_ALL_ONLINE },
- { 0x338b80, 1, RI_ALL_ONLINE },
- { 0x338bc0, 1, RI_ALL_ONLINE },
- { 0x338c00, 1, RI_ALL_ONLINE },
- { 0x338c40, 1, RI_ALL_ONLINE },
- { 0x338c80, 1, RI_ALL_ONLINE },
- { 0x338cc0, 1, RI_ALL_ONLINE },
- { 0x338cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x338d00, 1, RI_ALL_ONLINE },
- { 0x338d40, 1, RI_ALL_ONLINE },
- { 0x338d80, 1, RI_ALL_ONLINE },
- { 0x338dc0, 1, RI_ALL_ONLINE },
- { 0x338e00, 1, RI_ALL_ONLINE },
- { 0x338e40, 1, RI_ALL_ONLINE },
- { 0x338e80, 1, RI_ALL_ONLINE },
- { 0x338e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x338ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x338fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x338fe8, 2, RI_E3E3B0_ONLINE },
- { 0x339000, 1, RI_E2E3E3B0_ONLINE },
- { 0x339040, 3, RI_E2E3E3B0_ONLINE },
- { 0x33905c, 1, RI_E3E3B0_ONLINE },
- { 0x339064, 1, RI_E3B0_ONLINE },
- { 0x339080, 10, RI_E3B0_ONLINE },
- { 0x340000, 2, RI_ALL_ONLINE },
+ { 0x2000, 1, 0x1f, 0xfff},
+ { 0x2004, 1, 0x1f, 0x1fff},
+ { 0x2008, 25, 0x1f, 0xfff},
+ { 0x206c, 1, 0x1f, 0x1fff},
+ { 0x2070, 313, 0x1f, 0xfff},
+ { 0x2800, 103, 0x1f, 0xfff},
+ { 0x3000, 287, 0x1f, 0xfff},
+ { 0x3800, 331, 0x1f, 0xfff},
+ { 0x8800, 6, 0x1f, 0x924},
+ { 0x8818, 1, 0x1e, 0x924},
+ { 0x9000, 4, 0x1c, 0x924},
+ { 0x9010, 7, 0x1c, 0xfff},
+ { 0x902c, 1, 0x1c, 0x924},
+ { 0x9030, 1, 0x1c, 0xfff},
+ { 0x9034, 13, 0x1c, 0x924},
+ { 0x9068, 16, 0x1c, 0xfff},
+ { 0x90a8, 98, 0x1c, 0x924},
+ { 0x9230, 2, 0x1c, 0xfff},
+ { 0x9238, 3, 0x1c, 0x924},
+ { 0x9244, 1, 0x1c, 0xfff},
+ { 0x9248, 1, 0x1c, 0x924},
+ { 0x924c, 1, 0x4, 0x924},
+ { 0x9250, 16, 0x1c, 0x924},
+ { 0x92a8, 2, 0x1c, 0x1fff},
+ { 0x92b4, 1, 0x1c, 0x1fff},
+ { 0x9400, 33, 0x1c, 0x924},
+ { 0x9484, 5, 0x18, 0x924},
+ { 0xa000, 27, 0x1f, 0x924},
+ { 0xa06c, 1, 0x3, 0x924},
+ { 0xa070, 2, 0x1f, 0x924},
+ { 0xa078, 1, 0x1f, 0x1fff},
+ { 0xa07c, 31, 0x1f, 0x924},
+ { 0xa0f8, 1, 0x1f, 0x1fff},
+ { 0xa0fc, 3, 0x1f, 0x924},
+ { 0xa108, 1, 0x1f, 0x1fff},
+ { 0xa10c, 3, 0x1f, 0x924},
+ { 0xa118, 1, 0x1f, 0x1fff},
+ { 0xa11c, 28, 0x1f, 0x924},
+ { 0xa18c, 4, 0x3, 0x924},
+ { 0xa19c, 3, 0x1f, 0x924},
+ { 0xa1a8, 1, 0x1f, 0x1fff},
+ { 0xa1ac, 3, 0x1f, 0x924},
+ { 0xa1b8, 1, 0x1f, 0x1fff},
+ { 0xa1bc, 54, 0x1f, 0x924},
+ { 0xa294, 2, 0x3, 0x924},
+ { 0xa29c, 2, 0x1f, 0x924},
+ { 0xa2a4, 2, 0x7, 0x924},
+ { 0xa2ac, 2, 0x1f, 0x924},
+ { 0xa2b4, 1, 0x1f, 0x1fff},
+ { 0xa2b8, 49, 0x1f, 0x924},
+ { 0xa38c, 2, 0x1f, 0x1fff},
+ { 0xa398, 1, 0x1f, 0x1fff},
+ { 0xa39c, 7, 0x1e, 0x924},
+ { 0xa3b8, 2, 0x18, 0x924},
+ { 0xa3c0, 1, 0x1e, 0x924},
+ { 0xa3c4, 1, 0x1e, 0xfff},
+ { 0xa3c8, 1, 0x1e, 0x924},
+ { 0xa3d0, 1, 0x1e, 0x924},
+ { 0xa3d8, 1, 0x1e, 0x924},
+ { 0xa3e0, 1, 0x1e, 0x924},
+ { 0xa3e8, 1, 0x1e, 0x924},
+ { 0xa3f0, 1, 0x1e, 0x924},
+ { 0xa3f8, 1, 0x1e, 0x924},
+ { 0xa400, 1, 0x1f, 0x924},
+ { 0xa404, 1, 0x1f, 0xfff},
+ { 0xa408, 2, 0x1f, 0x1fff},
+ { 0xa410, 7, 0x1f, 0x924},
+ { 0xa42c, 12, 0x1f, 0xfff},
+ { 0xa45c, 1, 0x1f, 0x924},
+ { 0xa460, 1, 0x1f, 0x1924},
+ { 0xa464, 15, 0x1f, 0x924},
+ { 0xa4a0, 1, 0x7, 0x924},
+ { 0xa4a4, 2, 0x1f, 0x924},
+ { 0xa4ac, 2, 0x3, 0x924},
+ { 0xa4b4, 1, 0x7, 0x924},
+ { 0xa4b8, 2, 0x3, 0x924},
+ { 0xa4c0, 3, 0x1f, 0x924},
+ { 0xa4cc, 5, 0x3, 0x924},
+ { 0xa4e0, 3, 0x1f, 0x924},
+ { 0xa4fc, 2, 0x1f, 0x924},
+ { 0xa504, 1, 0x3, 0x924},
+ { 0xa508, 3, 0x1f, 0x924},
+ { 0xa518, 1, 0x1f, 0x924},
+ { 0xa520, 1, 0x1f, 0x924},
+ { 0xa528, 1, 0x1f, 0x924},
+ { 0xa530, 1, 0x1f, 0x924},
+ { 0xa538, 1, 0x1f, 0x924},
+ { 0xa540, 1, 0x1f, 0x924},
+ { 0xa548, 1, 0x3, 0x924},
+ { 0xa550, 1, 0x3, 0x924},
+ { 0xa558, 1, 0x3, 0x924},
+ { 0xa560, 1, 0x3, 0x924},
+ { 0xa568, 1, 0x3, 0x924},
+ { 0xa570, 1, 0x1f, 0x924},
+ { 0xa580, 1, 0x1f, 0x1fff},
+ { 0xa590, 1, 0x1f, 0x1fff},
+ { 0xa5a0, 1, 0x7, 0x924},
+ { 0xa5c0, 1, 0x1f, 0x924},
+ { 0xa5e0, 1, 0x1e, 0x924},
+ { 0xa5e8, 1, 0x1e, 0x924},
+ { 0xa5f0, 1, 0x1e, 0x924},
+ { 0xa5f8, 1, 0x6, 0x924},
+ { 0xa5fc, 1, 0x1e, 0x924},
+ { 0xa600, 5, 0x1e, 0xfff},
+ { 0xa614, 1, 0x1e, 0x924},
+ { 0xa618, 1, 0x1e, 0xfff},
+ { 0xa61c, 1, 0x1e, 0x924},
+ { 0xa620, 6, 0x1c, 0x924},
+ { 0xa638, 20, 0x4, 0x924},
+ { 0xa688, 35, 0x1c, 0x924},
+ { 0xa714, 1, 0x1c, 0xfff},
+ { 0xa718, 2, 0x1c, 0x924},
+ { 0xa720, 1, 0x1c, 0xfff},
+ { 0xa724, 3, 0x1c, 0x924},
+ { 0xa730, 1, 0x4, 0x924},
+ { 0xa734, 2, 0x1c, 0x924},
+ { 0xa73c, 4, 0x4, 0x924},
+ { 0xa74c, 1, 0x1c, 0x924},
+ { 0xa750, 1, 0x1c, 0xfff},
+ { 0xa754, 3, 0x1c, 0x924},
+ { 0xa760, 5, 0x4, 0x924},
+ { 0xa774, 7, 0x1c, 0x924},
+ { 0xa790, 15, 0x4, 0x924},
+ { 0xa7cc, 4, 0x1c, 0x924},
+ { 0xa7e0, 6, 0x18, 0x924},
+ { 0xa800, 18, 0x4, 0x924},
+ { 0xa848, 33, 0x1c, 0x924},
+ { 0xa8cc, 2, 0x18, 0x924},
+ { 0xa8d4, 4, 0x1c, 0x924},
+ { 0xa8e4, 1, 0x18, 0x924},
+ { 0xa8e8, 1, 0x1c, 0x924},
+ { 0xa8f0, 1, 0x1c, 0x924},
+ { 0xa8f8, 30, 0x18, 0x924},
+ { 0xa974, 73, 0x18, 0x924},
+ { 0xac30, 1, 0x18, 0x924},
+ { 0xac40, 1, 0x18, 0x924},
+ { 0xac50, 1, 0x18, 0x924},
+ { 0xac60, 1, 0x10, 0x924},
+ { 0x10000, 9, 0x1f, 0x924},
+ { 0x10024, 1, 0x7, 0x924},
+ { 0x10028, 5, 0x1f, 0x924},
+ { 0x1003c, 6, 0x7, 0x924},
+ { 0x10054, 20, 0x1f, 0x924},
+ { 0x100a4, 4, 0x7, 0x924},
+ { 0x100b4, 11, 0x1f, 0x924},
+ { 0x100e0, 4, 0x7, 0x924},
+ { 0x100f0, 8, 0x1f, 0x924},
+ { 0x10110, 6, 0x7, 0x924},
+ { 0x10128, 110, 0x1f, 0x924},
+ { 0x102e0, 4, 0x7, 0x924},
+ { 0x102f0, 18, 0x1f, 0x924},
+ { 0x10338, 20, 0x7, 0x924},
+ { 0x10388, 10, 0x1f, 0x924},
+ { 0x103d0, 2, 0x3, 0x1fff},
+ { 0x103dc, 1, 0x3, 0x1fff},
+ { 0x10400, 6, 0x7, 0x924},
+ { 0x10418, 1, 0x1f, 0xfff},
+ { 0x1041c, 1, 0x1f, 0x924},
+ { 0x10420, 1, 0x1f, 0xfff},
+ { 0x10424, 1, 0x1f, 0x924},
+ { 0x10428, 1, 0x1f, 0xfff},
+ { 0x1042c, 1, 0x1f, 0x924},
+ { 0x10430, 10, 0x7, 0x924},
+ { 0x10458, 2, 0x1f, 0x924},
+ { 0x10460, 1, 0x1f, 0xfff},
+ { 0x10464, 4, 0x1f, 0x924},
+ { 0x10474, 1, 0x1f, 0xfff},
+ { 0x10478, 14, 0x1f, 0x924},
+ { 0x104b0, 12, 0x7, 0x924},
+ { 0x104e0, 1, 0x1f, 0xfff},
+ { 0x104e8, 1, 0x1f, 0x924},
+ { 0x104ec, 1, 0x1f, 0xfff},
+ { 0x104f4, 1, 0x1f, 0x924},
+ { 0x104f8, 1, 0x1f, 0xfff},
+ { 0x10500, 2, 0x1f, 0x924},
+ { 0x10508, 1, 0x1f, 0xfff},
+ { 0x1050c, 9, 0x1f, 0x924},
+ { 0x10530, 1, 0x1f, 0xfff},
+ { 0x10534, 1, 0x1f, 0x924},
+ { 0x10538, 1, 0x1f, 0xfff},
+ { 0x1053c, 3, 0x1f, 0x924},
+ { 0x10548, 1, 0x1f, 0xfff},
+ { 0x1054c, 3, 0x1f, 0x924},
+ { 0x10558, 1, 0x1f, 0xfff},
+ { 0x1055c, 123, 0x1f, 0x924},
+ { 0x10750, 2, 0x7, 0x924},
+ { 0x10760, 2, 0x7, 0x924},
+ { 0x10770, 2, 0x7, 0x924},
+ { 0x10780, 2, 0x7, 0x924},
+ { 0x10790, 2, 0x1f, 0x924},
+ { 0x107a0, 2, 0x7, 0x924},
+ { 0x107b0, 2, 0x7, 0x924},
+ { 0x107c0, 2, 0x7, 0x924},
+ { 0x107d0, 2, 0x7, 0x924},
+ { 0x107e0, 2, 0x1f, 0x924},
+ { 0x10880, 2, 0x1f, 0x924},
+ { 0x10900, 2, 0x1f, 0x924},
+ { 0x16000, 1, 0x6, 0x924},
+ { 0x16004, 25, 0x1e, 0x924},
+ { 0x16070, 8, 0x1e, 0x924},
+ { 0x16090, 4, 0xe, 0x924},
+ { 0x160a0, 6, 0x1e, 0x924},
+ { 0x160c0, 7, 0x1e, 0x924},
+ { 0x160dc, 2, 0x6, 0x924},
+ { 0x160e4, 6, 0x1e, 0x924},
+ { 0x160fc, 4, 0x1e, 0x1fff},
+ { 0x1610c, 2, 0x6, 0x924},
+ { 0x16114, 6, 0x1e, 0x924},
+ { 0x16140, 48, 0x1e, 0x1fff},
+ { 0x16204, 5, 0x1e, 0x924},
+ { 0x18000, 1, 0x1e, 0x924},
+ { 0x18008, 1, 0x1e, 0x924},
+ { 0x18010, 35, 0x1c, 0x924},
+ { 0x180a4, 2, 0x1c, 0x924},
+ { 0x180c0, 9, 0x1c, 0x924},
+ { 0x180e4, 1, 0xc, 0x924},
+ { 0x180e8, 2, 0x1c, 0x924},
+ { 0x180f0, 1, 0xc, 0x924},
+ { 0x180f4, 79, 0x1c, 0x924},
+ { 0x18230, 1, 0xc, 0x924},
+ { 0x18234, 2, 0x1c, 0x924},
+ { 0x1823c, 1, 0xc, 0x924},
+ { 0x18240, 13, 0x1c, 0x924},
+ { 0x18274, 1, 0x4, 0x924},
+ { 0x18278, 12, 0x1c, 0x924},
+ { 0x182a8, 1, 0x1c, 0xfff},
+ { 0x182ac, 3, 0x1c, 0x924},
+ { 0x182b8, 1, 0x1c, 0xfff},
+ { 0x182bc, 19, 0x1c, 0x924},
+ { 0x18308, 1, 0x1c, 0xfff},
+ { 0x1830c, 3, 0x1c, 0x924},
+ { 0x18318, 1, 0x1c, 0xfff},
+ { 0x1831c, 7, 0x1c, 0x924},
+ { 0x18338, 1, 0x1c, 0xfff},
+ { 0x1833c, 3, 0x1c, 0x924},
+ { 0x18348, 1, 0x1c, 0xfff},
+ { 0x1834c, 28, 0x1c, 0x924},
+ { 0x183bc, 2, 0x1c, 0x1fff},
+ { 0x183c8, 3, 0x1c, 0x1fff},
+ { 0x183d8, 1, 0x1c, 0x1fff},
+ { 0x18440, 48, 0x1c, 0x1fff},
+ { 0x18500, 15, 0x1c, 0x924},
+ { 0x18570, 1, 0x18, 0xfff},
+ { 0x18574, 1, 0x18, 0x924},
+ { 0x18578, 1, 0x18, 0xfff},
+ { 0x1857c, 4, 0x18, 0x924},
+ { 0x1858c, 1, 0x18, 0xfff},
+ { 0x18590, 1, 0x18, 0x924},
+ { 0x18594, 1, 0x18, 0xfff},
+ { 0x18598, 32, 0x18, 0x924},
+ { 0x18618, 5, 0x10, 0x924},
+ { 0x1862c, 4, 0x10, 0xfff},
+ { 0x1863c, 16, 0x10, 0x924},
+ { 0x18680, 44, 0x10, 0x924},
+ { 0x18748, 12, 0x10, 0x924},
+ { 0x18788, 1, 0x10, 0x924},
+ { 0x1879c, 6, 0x10, 0x924},
+ { 0x187c4, 51, 0x10, 0x924},
+ { 0x18a00, 48, 0x10, 0x924},
+ { 0x20000, 24, 0x1f, 0x924},
+ { 0x20060, 8, 0x1f, 0x9e4},
+ { 0x20080, 94, 0x1f, 0x924},
+ { 0x201f8, 1, 0x3, 0x924},
+ { 0x201fc, 1, 0x1f, 0x924},
+ { 0x20200, 1, 0x3, 0x924},
+ { 0x20204, 1, 0x1f, 0x924},
+ { 0x20208, 1, 0x3, 0x924},
+ { 0x2020c, 4, 0x1f, 0x924},
+ { 0x2021c, 11, 0x1f, 0xfff},
+ { 0x20248, 24, 0x1f, 0x924},
+ { 0x202b8, 2, 0x1f, 0x1fff},
+ { 0x202c4, 1, 0x1f, 0x1fff},
+ { 0x202c8, 1, 0x1c, 0x924},
+ { 0x202d8, 4, 0x1c, 0x924},
+ { 0x202f0, 1, 0x10, 0x924},
+ { 0x20400, 1, 0x1f, 0x924},
+ { 0x20404, 1, 0x1f, 0xfff},
+ { 0x2040c, 2, 0x1f, 0xfff},
+ { 0x20414, 2, 0x1f, 0x924},
+ { 0x2041c, 2, 0x1f, 0xfff},
+ { 0x20424, 2, 0x1f, 0x924},
+ { 0x2042c, 18, 0x1e, 0x924},
+ { 0x20480, 1, 0x1f, 0x924},
+ { 0x20500, 1, 0x1f, 0x924},
+ { 0x20600, 1, 0x1f, 0x924},
+ { 0x28000, 1, 0x1f, 0x9e4},
+ { 0x28004, 255, 0x1f, 0x180},
+ { 0x28400, 1, 0x1f, 0x1c0},
+ { 0x28404, 255, 0x1f, 0x180},
+ { 0x28800, 1, 0x1f, 0x1c0},
+ { 0x28804, 255, 0x1f, 0x180},
+ { 0x28c00, 1, 0x1f, 0x1c0},
+ { 0x28c04, 255, 0x1f, 0x180},
+ { 0x29000, 1, 0x1f, 0x1c0},
+ { 0x29004, 255, 0x1f, 0x180},
+ { 0x29400, 1, 0x1f, 0x1c0},
+ { 0x29404, 255, 0x1f, 0x180},
+ { 0x29800, 1, 0x1f, 0x1c0},
+ { 0x29804, 255, 0x1f, 0x180},
+ { 0x29c00, 1, 0x1f, 0x1c0},
+ { 0x29c04, 255, 0x1f, 0x180},
+ { 0x2a000, 1, 0x1f, 0x1c0},
+ { 0x2a004, 255, 0x1f, 0x180},
+ { 0x2a400, 1, 0x1f, 0x1c0},
+ { 0x2a404, 255, 0x1f, 0x180},
+ { 0x2a800, 1, 0x1f, 0x1c0},
+ { 0x2a804, 255, 0x1f, 0x180},
+ { 0x2ac00, 1, 0x1f, 0x1c0},
+ { 0x2ac04, 255, 0x1f, 0x180},
+ { 0x2b000, 1, 0x1f, 0x1c0},
+ { 0x2b004, 255, 0x1f, 0x180},
+ { 0x2b400, 1, 0x1f, 0x1c0},
+ { 0x2b404, 255, 0x1f, 0x180},
+ { 0x2b800, 1, 0x1f, 0x1c0},
+ { 0x2b804, 255, 0x1f, 0x180},
+ { 0x2bc00, 1, 0x1f, 0x1c0},
+ { 0x2bc04, 255, 0x1f, 0x180},
+ { 0x2c000, 1, 0x1f, 0x1c0},
+ { 0x2c004, 255, 0x1f, 0x180},
+ { 0x2c400, 1, 0x1f, 0x1c0},
+ { 0x2c404, 255, 0x1f, 0x180},
+ { 0x2c800, 1, 0x1f, 0x1c0},
+ { 0x2c804, 255, 0x1f, 0x180},
+ { 0x2cc00, 1, 0x1f, 0x1c0},
+ { 0x2cc04, 255, 0x1f, 0x180},
+ { 0x2d000, 1, 0x1f, 0x1c0},
+ { 0x2d004, 255, 0x1f, 0x180},
+ { 0x2d400, 1, 0x1f, 0x1c0},
+ { 0x2d404, 255, 0x1f, 0x180},
+ { 0x2d800, 1, 0x1f, 0x1c0},
+ { 0x2d804, 255, 0x1f, 0x180},
+ { 0x2dc00, 1, 0x1f, 0x1c0},
+ { 0x2dc04, 255, 0x1f, 0x180},
+ { 0x2e000, 1, 0x1f, 0x1c0},
+ { 0x2e004, 255, 0x1f, 0x180},
+ { 0x2e400, 1, 0x1f, 0x1c0},
+ { 0x2e404, 255, 0x1f, 0x180},
+ { 0x2e800, 1, 0x1f, 0x1c0},
+ { 0x2e804, 255, 0x1f, 0x180},
+ { 0x2ec00, 1, 0x1f, 0x1c0},
+ { 0x2ec04, 255, 0x1f, 0x180},
+ { 0x2f000, 1, 0x1f, 0x1c0},
+ { 0x2f004, 255, 0x1f, 0x180},
+ { 0x2f400, 1, 0x1f, 0x1c0},
+ { 0x2f404, 255, 0x1f, 0x180},
+ { 0x2f800, 1, 0x1f, 0x1c0},
+ { 0x2f804, 255, 0x1f, 0x180},
+ { 0x2fc00, 1, 0x1f, 0x1c0},
+ { 0x2fc04, 255, 0x1f, 0x180},
+ { 0x30000, 1, 0x1f, 0x9e4},
+ { 0x30004, 255, 0x1f, 0x180},
+ { 0x30400, 1, 0x1f, 0x1c0},
+ { 0x30404, 255, 0x1f, 0x180},
+ { 0x30800, 1, 0x1f, 0x1c0},
+ { 0x30804, 255, 0x1f, 0x180},
+ { 0x30c00, 1, 0x1f, 0x1c0},
+ { 0x30c04, 255, 0x1f, 0x180},
+ { 0x31000, 1, 0x1f, 0x1c0},
+ { 0x31004, 255, 0x1f, 0x180},
+ { 0x31400, 1, 0x1f, 0x1c0},
+ { 0x31404, 255, 0x1f, 0x180},
+ { 0x31800, 1, 0x1f, 0x1c0},
+ { 0x31804, 255, 0x1f, 0x180},
+ { 0x31c00, 1, 0x1f, 0x1c0},
+ { 0x31c04, 255, 0x1f, 0x180},
+ { 0x32000, 1, 0x1f, 0x1c0},
+ { 0x32004, 255, 0x1f, 0x180},
+ { 0x32400, 1, 0x1f, 0x1c0},
+ { 0x32404, 255, 0x1f, 0x180},
+ { 0x32800, 1, 0x1f, 0x1c0},
+ { 0x32804, 255, 0x1f, 0x180},
+ { 0x32c00, 1, 0x1f, 0x1c0},
+ { 0x32c04, 255, 0x1f, 0x180},
+ { 0x33000, 1, 0x1f, 0x1c0},
+ { 0x33004, 255, 0x1f, 0x180},
+ { 0x33400, 1, 0x1f, 0x1c0},
+ { 0x33404, 255, 0x1f, 0x180},
+ { 0x33800, 1, 0x1f, 0x1c0},
+ { 0x33804, 255, 0x1f, 0x180},
+ { 0x33c00, 1, 0x1f, 0x1c0},
+ { 0x33c04, 255, 0x1f, 0x180},
+ { 0x34000, 1, 0x1f, 0x1c0},
+ { 0x34004, 255, 0x1f, 0x180},
+ { 0x34400, 1, 0x1f, 0x1c0},
+ { 0x34404, 255, 0x1f, 0x180},
+ { 0x34800, 1, 0x1f, 0x1c0},
+ { 0x34804, 255, 0x1f, 0x180},
+ { 0x34c00, 1, 0x1f, 0x1c0},
+ { 0x34c04, 255, 0x1f, 0x180},
+ { 0x35000, 1, 0x1f, 0x1c0},
+ { 0x35004, 255, 0x1f, 0x180},
+ { 0x35400, 1, 0x1f, 0x1c0},
+ { 0x35404, 255, 0x1f, 0x180},
+ { 0x35800, 1, 0x1f, 0x1c0},
+ { 0x35804, 255, 0x1f, 0x180},
+ { 0x35c00, 1, 0x1f, 0x1c0},
+ { 0x35c04, 255, 0x1f, 0x180},
+ { 0x36000, 1, 0x1f, 0x1c0},
+ { 0x36004, 255, 0x1f, 0x180},
+ { 0x36400, 1, 0x1f, 0x1c0},
+ { 0x36404, 255, 0x1f, 0x180},
+ { 0x36800, 1, 0x1f, 0x1c0},
+ { 0x36804, 255, 0x1f, 0x180},
+ { 0x36c00, 1, 0x1f, 0x1c0},
+ { 0x36c04, 255, 0x1f, 0x180},
+ { 0x37000, 1, 0x1f, 0x1c0},
+ { 0x37004, 255, 0x1f, 0x180},
+ { 0x37400, 1, 0x1f, 0x1c0},
+ { 0x37404, 255, 0x1f, 0x180},
+ { 0x37800, 1, 0x1f, 0x1c0},
+ { 0x37804, 255, 0x1f, 0x180},
+ { 0x37c00, 1, 0x1f, 0x1c0},
+ { 0x37c04, 255, 0x1f, 0x180},
+ { 0x38000, 1, 0x1f, 0x1c0},
+ { 0x38004, 255, 0x1f, 0x180},
+ { 0x38400, 1, 0x1f, 0x1c0},
+ { 0x38404, 255, 0x1f, 0x180},
+ { 0x38800, 1, 0x1f, 0x1c0},
+ { 0x38804, 255, 0x1f, 0x180},
+ { 0x38c00, 1, 0x1f, 0x1c0},
+ { 0x38c04, 255, 0x1f, 0x180},
+ { 0x39000, 1, 0x1f, 0x1c0},
+ { 0x39004, 255, 0x1f, 0x180},
+ { 0x39400, 1, 0x1f, 0x1c0},
+ { 0x39404, 255, 0x1f, 0x180},
+ { 0x39800, 1, 0x1f, 0x1c0},
+ { 0x39804, 255, 0x1f, 0x180},
+ { 0x39c00, 1, 0x1f, 0x1c0},
+ { 0x39c04, 255, 0x1f, 0x180},
+ { 0x3a000, 1, 0x1f, 0x1c0},
+ { 0x3a004, 255, 0x1f, 0x180},
+ { 0x3a400, 1, 0x1f, 0x1c0},
+ { 0x3a404, 255, 0x1f, 0x180},
+ { 0x3a800, 1, 0x1f, 0x1c0},
+ { 0x3a804, 255, 0x1f, 0x180},
+ { 0x3ac00, 1, 0x1f, 0x1c0},
+ { 0x3ac04, 255, 0x1f, 0x180},
+ { 0x3b000, 1, 0x1f, 0x1c0},
+ { 0x3b004, 255, 0x1f, 0x180},
+ { 0x3b400, 1, 0x1f, 0x1c0},
+ { 0x3b404, 255, 0x1f, 0x180},
+ { 0x3b800, 1, 0x1f, 0x1c0},
+ { 0x3b804, 255, 0x1f, 0x180},
+ { 0x3bc00, 1, 0x1f, 0x1c0},
+ { 0x3bc04, 255, 0x1f, 0x180},
+ { 0x3c000, 1, 0x1f, 0x1c0},
+ { 0x3c004, 255, 0x1f, 0x180},
+ { 0x3c400, 1, 0x1f, 0x1c0},
+ { 0x3c404, 255, 0x1f, 0x180},
+ { 0x3c800, 1, 0x1f, 0x1c0},
+ { 0x3c804, 255, 0x1f, 0x180},
+ { 0x3cc00, 1, 0x1f, 0x1c0},
+ { 0x3cc04, 255, 0x1f, 0x180},
+ { 0x3d000, 1, 0x1f, 0x1c0},
+ { 0x3d004, 255, 0x1f, 0x180},
+ { 0x3d400, 1, 0x1f, 0x1c0},
+ { 0x3d404, 255, 0x1f, 0x180},
+ { 0x3d800, 1, 0x1f, 0x1c0},
+ { 0x3d804, 255, 0x1f, 0x180},
+ { 0x3dc00, 1, 0x1f, 0x1c0},
+ { 0x3dc04, 255, 0x1f, 0x180},
+ { 0x3e000, 1, 0x1f, 0x1c0},
+ { 0x3e004, 255, 0x1f, 0x180},
+ { 0x3e400, 1, 0x1f, 0x1c0},
+ { 0x3e404, 255, 0x1f, 0x180},
+ { 0x3e800, 1, 0x1f, 0x1c0},
+ { 0x3e804, 255, 0x1f, 0x180},
+ { 0x3ec00, 1, 0x1f, 0x1c0},
+ { 0x3ec04, 255, 0x1f, 0x180},
+ { 0x3f000, 1, 0x1f, 0x1c0},
+ { 0x3f004, 255, 0x1f, 0x180},
+ { 0x3f400, 1, 0x1f, 0x1c0},
+ { 0x3f404, 255, 0x1f, 0x180},
+ { 0x3f800, 1, 0x1f, 0x1c0},
+ { 0x3f804, 255, 0x1f, 0x180},
+ { 0x3fc00, 1, 0x1f, 0x1c0},
+ { 0x3fc04, 255, 0x1f, 0x180},
+ { 0x40000, 85, 0x1f, 0x924},
+ { 0x40154, 13, 0x1f, 0xfff},
+ { 0x40198, 2, 0x1f, 0x1fff},
+ { 0x401a4, 1, 0x1f, 0x1fff},
+ { 0x401a8, 8, 0x1e, 0x924},
+ { 0x401c8, 1, 0x2, 0x924},
+ { 0x401cc, 2, 0x1e, 0x924},
+ { 0x401d4, 2, 0x1c, 0x924},
+ { 0x40200, 4, 0x1f, 0x924},
+ { 0x40220, 6, 0x1c, 0x924},
+ { 0x40238, 8, 0xc, 0x924},
+ { 0x40258, 4, 0x1c, 0x924},
+ { 0x40268, 2, 0x18, 0x924},
+ { 0x40270, 17, 0x10, 0x924},
+ { 0x40400, 43, 0x1f, 0x924},
+ { 0x404bc, 2, 0x1f, 0x1fff},
+ { 0x404c8, 1, 0x1f, 0x1fff},
+ { 0x404cc, 3, 0x1e, 0x924},
+ { 0x404e0, 1, 0x1c, 0x924},
+ { 0x40500, 2, 0x1f, 0x924},
+ { 0x40510, 2, 0x1f, 0x924},
+ { 0x40520, 2, 0x1f, 0x924},
+ { 0x40530, 2, 0x1f, 0x924},
+ { 0x40540, 2, 0x1f, 0x924},
+ { 0x40550, 10, 0x1c, 0x924},
+ { 0x40610, 2, 0x1c, 0x924},
+ { 0x42000, 164, 0x1f, 0x924},
+ { 0x422b0, 2, 0x1f, 0x1fff},
+ { 0x422bc, 1, 0x1f, 0x1fff},
+ { 0x422c0, 4, 0x1c, 0x924},
+ { 0x422d4, 5, 0x1e, 0x924},
+ { 0x422e8, 1, 0x1c, 0x924},
+ { 0x42400, 49, 0x1f, 0x924},
+ { 0x424c8, 32, 0x1f, 0x924},
+ { 0x42548, 1, 0x1f, 0xfff},
+ { 0x4254c, 1, 0x1f, 0x924},
+ { 0x42550, 1, 0x1f, 0xfff},
+ { 0x42554, 1, 0x1f, 0x924},
+ { 0x42558, 1, 0x1f, 0xfff},
+ { 0x4255c, 1, 0x1f, 0x924},
+ { 0x42568, 2, 0x1f, 0x924},
+ { 0x42640, 5, 0x1c, 0x924},
+ { 0x42800, 1, 0x1f, 0x924},
+ { 0x50000, 1, 0x1f, 0x1fff},
+ { 0x50004, 19, 0x1f, 0x924},
+ { 0x50050, 8, 0x1f, 0x93c},
+ { 0x50070, 60, 0x1f, 0x924},
+ { 0x50160, 8, 0x1f, 0xfff},
+ { 0x50180, 20, 0x1f, 0x924},
+ { 0x501e0, 2, 0x1f, 0x1fff},
+ { 0x501ec, 1, 0x1f, 0x1fff},
+ { 0x501f0, 4, 0x1e, 0x924},
+ { 0x50200, 1, 0x1f, 0x924},
+ { 0x50204, 1, 0x1f, 0xfff},
+ { 0x5020c, 2, 0x1f, 0xfff},
+ { 0x50214, 2, 0x1f, 0x924},
+ { 0x5021c, 1, 0x1f, 0xfff},
+ { 0x50220, 2, 0x1f, 0x924},
+ { 0x50228, 6, 0x1e, 0x924},
+ { 0x50240, 1, 0x1f, 0x924},
+ { 0x50280, 1, 0x1f, 0x924},
+ { 0x50300, 1, 0x1c, 0x924},
+ { 0x5030c, 1, 0x1c, 0x924},
+ { 0x50318, 1, 0x1c, 0x934},
+ { 0x5031c, 1, 0x1c, 0x924},
+ { 0x50320, 2, 0x1c, 0x934},
+ { 0x50330, 1, 0x10, 0x924},
+ { 0x52000, 1, 0x1f, 0x924},
+ { 0x54000, 1, 0x1f, 0x93c},
+ { 0x54004, 255, 0x1f, 0x30},
+ { 0x54400, 1, 0x1f, 0x38},
+ { 0x54404, 255, 0x1f, 0x30},
+ { 0x54800, 1, 0x1f, 0x38},
+ { 0x54804, 255, 0x1f, 0x30},
+ { 0x54c00, 1, 0x1f, 0x38},
+ { 0x54c04, 255, 0x1f, 0x30},
+ { 0x55000, 1, 0x1f, 0x38},
+ { 0x55004, 255, 0x1f, 0x30},
+ { 0x55400, 1, 0x1f, 0x38},
+ { 0x55404, 255, 0x1f, 0x30},
+ { 0x55800, 1, 0x1f, 0x38},
+ { 0x55804, 255, 0x1f, 0x30},
+ { 0x55c00, 1, 0x1f, 0x38},
+ { 0x55c04, 255, 0x1f, 0x30},
+ { 0x56000, 1, 0x1f, 0x38},
+ { 0x56004, 255, 0x1f, 0x30},
+ { 0x56400, 1, 0x1f, 0x38},
+ { 0x56404, 255, 0x1f, 0x30},
+ { 0x56800, 1, 0x1f, 0x38},
+ { 0x56804, 255, 0x1f, 0x30},
+ { 0x56c00, 1, 0x1f, 0x38},
+ { 0x56c04, 255, 0x1f, 0x30},
+ { 0x57000, 1, 0x1f, 0x38},
+ { 0x57004, 255, 0x1f, 0x30},
+ { 0x58000, 1, 0x1f, 0x934},
+ { 0x58004, 8191, 0x3, 0x30},
+ { 0x60000, 26, 0x1f, 0x924},
+ { 0x60068, 8, 0x3, 0x924},
+ { 0x60088, 2, 0x1f, 0x924},
+ { 0x60090, 1, 0x1f, 0xfff},
+ { 0x60094, 9, 0x1f, 0x924},
+ { 0x600b8, 9, 0x3, 0x924},
+ { 0x600dc, 1, 0x1f, 0x924},
+ { 0x600e0, 5, 0x3, 0x924},
+ { 0x600f4, 1, 0x7, 0x924},
+ { 0x600f8, 1, 0x3, 0x924},
+ { 0x600fc, 8, 0x1f, 0x924},
+ { 0x6012c, 2, 0x1f, 0x1fff},
+ { 0x60138, 1, 0x1f, 0x1fff},
+ { 0x6013c, 24, 0x2, 0x924},
+ { 0x6019c, 2, 0x1c, 0x924},
+ { 0x601ac, 18, 0x1c, 0x924},
+ { 0x60200, 1, 0x1f, 0xb6d},
+ { 0x60204, 2, 0x1f, 0x249},
+ { 0x60210, 13, 0x1c, 0x924},
+ { 0x60244, 16, 0x10, 0x924},
+ { 0x61000, 1, 0x1f, 0xb6d},
+ { 0x61004, 511, 0x1f, 0x249},
+ { 0x61800, 512, 0x18, 0x249},
+ { 0x70000, 8, 0x1f, 0xb6d},
+ { 0x70020, 8184, 0x1f, 0x249},
+ { 0x78000, 8192, 0x18, 0x249},
+ { 0x85000, 3, 0x1f, 0x1000},
+ { 0x8501c, 7, 0x1f, 0x1000},
+ { 0x85048, 1, 0x1f, 0x1000},
+ { 0x85200, 32, 0x1f, 0x1000},
+ { 0xa0000, 16384, 0x3, 0x1000},
+ { 0xb0000, 16384, 0x2, 0x1000},
+ { 0xc1000, 7, 0x1f, 0x924},
+ { 0xc102c, 2, 0x1f, 0x1fff},
+ { 0xc1038, 1, 0x1f, 0x1fff},
+ { 0xc103c, 2, 0x1c, 0x924},
+ { 0xc1800, 2, 0x1f, 0x924},
+ { 0xc2000, 164, 0x1f, 0x924},
+ { 0xc22b0, 2, 0x1f, 0x1fff},
+ { 0xc22bc, 1, 0x1f, 0x1fff},
+ { 0xc22c0, 5, 0x1c, 0x924},
+ { 0xc22d8, 4, 0x1c, 0x924},
+ { 0xc2400, 49, 0x1f, 0x924},
+ { 0xc24c8, 32, 0x1f, 0x924},
+ { 0xc2548, 1, 0x1f, 0xfff},
+ { 0xc254c, 1, 0x1f, 0x924},
+ { 0xc2550, 1, 0x1f, 0xfff},
+ { 0xc2554, 1, 0x1f, 0x924},
+ { 0xc2558, 1, 0x1f, 0xfff},
+ { 0xc255c, 1, 0x1f, 0x924},
+ { 0xc2568, 2, 0x1f, 0x924},
+ { 0xc2600, 1, 0x1f, 0x924},
+ { 0xc4000, 165, 0x1f, 0x924},
+ { 0xc42b4, 2, 0x1f, 0x1fff},
+ { 0xc42c0, 1, 0x1f, 0x1fff},
+ { 0xc42d8, 2, 0x1c, 0x924},
+ { 0xc42e0, 7, 0x1e, 0x924},
+ { 0xc42fc, 1, 0x1c, 0x924},
+ { 0xc4400, 51, 0x1f, 0x924},
+ { 0xc44d0, 32, 0x1f, 0x924},
+ { 0xc4550, 1, 0x1f, 0xfff},
+ { 0xc4554, 1, 0x1f, 0x924},
+ { 0xc4558, 1, 0x1f, 0xfff},
+ { 0xc455c, 1, 0x1f, 0x924},
+ { 0xc4560, 1, 0x1f, 0xfff},
+ { 0xc4564, 1, 0x1f, 0x924},
+ { 0xc4570, 2, 0x1f, 0x924},
+ { 0xc4578, 5, 0x1c, 0x924},
+ { 0xc4600, 1, 0x1f, 0x924},
+ { 0xd0000, 19, 0x1f, 0x924},
+ { 0xd004c, 8, 0x1f, 0x1927},
+ { 0xd006c, 64, 0x1f, 0x924},
+ { 0xd016c, 8, 0x1f, 0xfff},
+ { 0xd018c, 19, 0x1f, 0x924},
+ { 0xd01e8, 2, 0x1f, 0x1fff},
+ { 0xd01f4, 1, 0x1f, 0x1fff},
+ { 0xd01fc, 1, 0x1c, 0x924},
+ { 0xd0200, 1, 0x1f, 0x924},
+ { 0xd0204, 1, 0x1f, 0xfff},
+ { 0xd020c, 3, 0x1f, 0xfff},
+ { 0xd0218, 4, 0x1f, 0x924},
+ { 0xd0228, 18, 0x1e, 0x924},
+ { 0xd0280, 1, 0x1f, 0x924},
+ { 0xd0300, 1, 0x1f, 0x924},
+ { 0xd0400, 1, 0x1f, 0x924},
+ { 0xd0818, 1, 0x10, 0x924},
+ { 0xd4000, 1, 0x1f, 0x1927},
+ { 0xd4004, 255, 0x1f, 0x6},
+ { 0xd4400, 1, 0x1f, 0x1007},
+ { 0xd4404, 255, 0x1f, 0x6},
+ { 0xd4800, 1, 0x1f, 0x1007},
+ { 0xd4804, 255, 0x1f, 0x6},
+ { 0xd4c00, 1, 0x1f, 0x1007},
+ { 0xd4c04, 255, 0x1f, 0x6},
+ { 0xd5000, 1, 0x1f, 0x1007},
+ { 0xd5004, 255, 0x1f, 0x6},
+ { 0xd5400, 1, 0x1f, 0x1007},
+ { 0xd5404, 255, 0x1f, 0x6},
+ { 0xd5800, 1, 0x1f, 0x1007},
+ { 0xd5804, 255, 0x1f, 0x6},
+ { 0xd5c00, 1, 0x1f, 0x1007},
+ { 0xd5c04, 255, 0x1f, 0x6},
+ { 0xd6000, 1, 0x1f, 0x1007},
+ { 0xd6004, 255, 0x1f, 0x6},
+ { 0xd6400, 1, 0x1f, 0x1007},
+ { 0xd6404, 255, 0x1f, 0x6},
+ { 0xd8000, 1, 0x1f, 0x1927},
+ { 0xd8004, 255, 0x1f, 0x6},
+ { 0xd8400, 1, 0x1f, 0x1007},
+ { 0xd8404, 255, 0x1f, 0x6},
+ { 0xd8800, 1, 0x1f, 0x1007},
+ { 0xd8804, 255, 0x1f, 0x6},
+ { 0xd8c00, 1, 0x1f, 0x1007},
+ { 0xd8c04, 255, 0x1f, 0x6},
+ { 0xd9000, 1, 0x1f, 0x1007},
+ { 0xd9004, 255, 0x1f, 0x6},
+ { 0xd9400, 1, 0x1f, 0x1007},
+ { 0xd9404, 255, 0x1f, 0x6},
+ { 0xd9800, 1, 0x1f, 0x1007},
+ { 0xd9804, 255, 0x1f, 0x6},
+ { 0xd9c00, 1, 0x1f, 0x1007},
+ { 0xd9c04, 255, 0x1f, 0x6},
+ { 0xda000, 1, 0x1f, 0x1007},
+ { 0xda004, 255, 0x1f, 0x6},
+ { 0xda400, 1, 0x1f, 0x1007},
+ { 0xda404, 255, 0x1f, 0x6},
+ { 0xda800, 1, 0x1f, 0x1007},
+ { 0xda804, 255, 0x1f, 0x6},
+ { 0xdac00, 1, 0x1f, 0x1007},
+ { 0xdac04, 255, 0x1f, 0x6},
+ { 0xdb000, 1, 0x1f, 0x1007},
+ { 0xdb004, 255, 0x1f, 0x6},
+ { 0xdb400, 1, 0x1f, 0x1007},
+ { 0xdb404, 255, 0x1f, 0x6},
+ { 0xdb800, 1, 0x1f, 0x1007},
+ { 0xdb804, 255, 0x1f, 0x6},
+ { 0xdbc00, 1, 0x1f, 0x1007},
+ { 0xdbc04, 255, 0x1f, 0x6},
+ { 0xdc000, 1, 0x1f, 0x1007},
+ { 0xdc004, 255, 0x1f, 0x6},
+ { 0xdc400, 1, 0x1f, 0x1007},
+ { 0xdc404, 255, 0x1f, 0x6},
+ { 0xdc800, 1, 0x1f, 0x1007},
+ { 0xdc804, 255, 0x1f, 0x6},
+ { 0xdcc00, 1, 0x1f, 0x1007},
+ { 0xdcc04, 255, 0x1f, 0x6},
+ { 0xdd000, 1, 0x1f, 0x1007},
+ { 0xdd004, 255, 0x1f, 0x6},
+ { 0xdd400, 1, 0x1f, 0x1007},
+ { 0xdd404, 255, 0x1f, 0x6},
+ { 0xdd800, 1, 0x1f, 0x1007},
+ { 0xdd804, 255, 0x1f, 0x6},
+ { 0xddc00, 1, 0x1f, 0x1007},
+ { 0xddc04, 255, 0x1f, 0x6},
+ { 0xde000, 1, 0x1f, 0x1007},
+ { 0xde004, 255, 0x1f, 0x6},
+ { 0xde400, 1, 0x1f, 0x1007},
+ { 0xde404, 255, 0x1f, 0x6},
+ { 0xde800, 1, 0x1f, 0x1007},
+ { 0xde804, 255, 0x1f, 0x6},
+ { 0xdec00, 1, 0x1f, 0x1007},
+ { 0xdec04, 255, 0x1f, 0x6},
+ { 0xdf000, 1, 0x1f, 0x1007},
+ { 0xdf004, 255, 0x1f, 0x6},
+ { 0xdf400, 1, 0x1f, 0x1007},
+ { 0xdf404, 255, 0x1f, 0x6},
+ { 0xdf800, 1, 0x1f, 0x1007},
+ { 0xdf804, 255, 0x1f, 0x6},
+ { 0xdfc00, 1, 0x1f, 0x1007},
+ { 0xdfc04, 255, 0x1f, 0x6},
+ { 0xe0000, 21, 0x1f, 0x924},
+ { 0xe0054, 8, 0x1f, 0xf24},
+ { 0xe0074, 49, 0x1f, 0x924},
+ { 0xe0138, 1, 0x3, 0x924},
+ { 0xe013c, 6, 0x1f, 0x924},
+ { 0xe0154, 8, 0x1f, 0xfff},
+ { 0xe0174, 21, 0x1f, 0x924},
+ { 0xe01d8, 2, 0x1f, 0x1fff},
+ { 0xe01e4, 1, 0x1f, 0x1fff},
+ { 0xe01f4, 1, 0x4, 0x924},
+ { 0xe01f8, 1, 0x1c, 0x924},
+ { 0xe0200, 1, 0x1f, 0x924},
+ { 0xe0204, 1, 0x1f, 0xfff},
+ { 0xe020c, 2, 0x1f, 0xfff},
+ { 0xe0214, 2, 0x1f, 0x924},
+ { 0xe021c, 2, 0x1f, 0xfff},
+ { 0xe0224, 2, 0x1f, 0x924},
+ { 0xe022c, 18, 0x1e, 0x924},
+ { 0xe0280, 1, 0x1f, 0x924},
+ { 0xe0300, 1, 0x1f, 0x924},
+ { 0xe0400, 1, 0x10, 0x924},
+ { 0xe1000, 1, 0x1f, 0x924},
+ { 0xe2000, 1, 0x1f, 0xf24},
+ { 0xe2004, 255, 0x1f, 0xc00},
+ { 0xe2400, 1, 0x1f, 0xe00},
+ { 0xe2404, 255, 0x1f, 0xc00},
+ { 0xe2800, 1, 0x1f, 0xe00},
+ { 0xe2804, 255, 0x1f, 0xc00},
+ { 0xe2c00, 1, 0x1f, 0xe00},
+ { 0xe2c04, 255, 0x1f, 0xc00},
+ { 0xe3000, 1, 0x1f, 0xe00},
+ { 0xe3004, 255, 0x1f, 0xc00},
+ { 0xe3400, 1, 0x1f, 0xe00},
+ { 0xe3404, 255, 0x1f, 0xc00},
+ { 0xe3800, 1, 0x1f, 0xe00},
+ { 0xe3804, 255, 0x1f, 0xc00},
+ { 0xe3c00, 1, 0x1f, 0xe00},
+ { 0xe3c04, 255, 0x1f, 0xc00},
+ { 0xf0000, 1, 0x1f, 0xf24},
+ { 0xf0004, 255, 0x1f, 0xc00},
+ { 0xf0400, 1, 0x1f, 0xe00},
+ { 0xf0404, 255, 0x1f, 0xc00},
+ { 0xf0800, 1, 0x1f, 0xe00},
+ { 0xf0804, 255, 0x1f, 0xc00},
+ { 0xf0c00, 1, 0x1f, 0xe00},
+ { 0xf0c04, 255, 0x1f, 0xc00},
+ { 0xf1000, 1, 0x1f, 0xe00},
+ { 0xf1004, 255, 0x1f, 0xc00},
+ { 0xf1400, 1, 0x1f, 0xe00},
+ { 0xf1404, 255, 0x1f, 0xc00},
+ { 0xf1800, 1, 0x1f, 0xe00},
+ { 0xf1804, 255, 0x1f, 0xc00},
+ { 0xf1c00, 1, 0x1f, 0xe00},
+ { 0xf1c04, 255, 0x1f, 0xc00},
+ { 0xf2000, 1, 0x1f, 0xe00},
+ { 0xf2004, 255, 0x1f, 0xc00},
+ { 0xf2400, 1, 0x1f, 0xe00},
+ { 0xf2404, 255, 0x1f, 0xc00},
+ { 0xf2800, 1, 0x1f, 0xe00},
+ { 0xf2804, 255, 0x1f, 0xc00},
+ { 0xf2c00, 1, 0x1f, 0xe00},
+ { 0xf2c04, 255, 0x1f, 0xc00},
+ { 0xf3000, 1, 0x1f, 0xe00},
+ { 0xf3004, 255, 0x1f, 0xc00},
+ { 0xf3400, 1, 0x1f, 0xe00},
+ { 0xf3404, 255, 0x1f, 0xc00},
+ { 0xf3800, 1, 0x1f, 0xe00},
+ { 0xf3804, 255, 0x1f, 0xc00},
+ { 0xf3c00, 1, 0x1f, 0xe00},
+ { 0xf3c04, 255, 0x1f, 0xc00},
+ { 0xf4000, 1, 0x1f, 0xe00},
+ { 0xf4004, 255, 0x1f, 0xc00},
+ { 0xf4400, 1, 0x1f, 0xe00},
+ { 0xf4404, 255, 0x1f, 0xc00},
+ { 0xf4800, 1, 0x1f, 0xe00},
+ { 0xf4804, 255, 0x1f, 0xc00},
+ { 0xf4c00, 1, 0x1f, 0xe00},
+ { 0xf4c04, 255, 0x1f, 0xc00},
+ { 0xf5000, 1, 0x1f, 0xe00},
+ { 0xf5004, 255, 0x1f, 0xc00},
+ { 0xf5400, 1, 0x1f, 0xe00},
+ { 0xf5404, 255, 0x1f, 0xc00},
+ { 0xf5800, 1, 0x1f, 0xe00},
+ { 0xf5804, 255, 0x1f, 0xc00},
+ { 0xf5c00, 1, 0x1f, 0xe00},
+ { 0xf5c04, 255, 0x1f, 0xc00},
+ { 0xf6000, 1, 0x1f, 0xe00},
+ { 0xf6004, 255, 0x1f, 0xc00},
+ { 0xf6400, 1, 0x1f, 0xe00},
+ { 0xf6404, 255, 0x1f, 0xc00},
+ { 0xf6800, 1, 0x1f, 0xe00},
+ { 0xf6804, 255, 0x1f, 0xc00},
+ { 0xf6c00, 1, 0x1f, 0xe00},
+ { 0xf6c04, 255, 0x1f, 0xc00},
+ { 0xf7000, 1, 0x1f, 0xe00},
+ { 0xf7004, 255, 0x1f, 0xc00},
+ { 0xf7400, 1, 0x1f, 0xe00},
+ { 0xf7404, 255, 0x1f, 0xc00},
+ { 0xf7800, 1, 0x1f, 0xe00},
+ { 0xf7804, 255, 0x1f, 0xc00},
+ { 0xf7c00, 1, 0x1f, 0xe00},
+ { 0xf7c04, 255, 0x1f, 0xc00},
+ { 0xf8000, 1, 0x1f, 0xe00},
+ { 0xf8004, 255, 0x1f, 0xc00},
+ { 0xf8400, 1, 0x1f, 0xe00},
+ { 0xf8404, 255, 0x1f, 0xc00},
+ { 0xf8800, 1, 0x1f, 0xe00},
+ { 0xf8804, 255, 0x1f, 0xc00},
+ { 0xf8c00, 1, 0x1f, 0xe00},
+ { 0xf8c04, 255, 0x1f, 0xc00},
+ { 0xf9000, 1, 0x1f, 0xe00},
+ { 0xf9004, 255, 0x1f, 0xc00},
+ { 0xf9400, 1, 0x1f, 0xe00},
+ { 0xf9404, 255, 0x1f, 0xc00},
+ { 0xf9800, 1, 0x1f, 0xe00},
+ { 0xf9804, 255, 0x1f, 0xc00},
+ { 0xf9c00, 1, 0x1f, 0xe00},
+ { 0xf9c04, 255, 0x1f, 0xc00},
+ { 0xfa000, 1, 0x1f, 0xe00},
+ { 0xfa004, 255, 0x1f, 0xc00},
+ { 0xfa400, 1, 0x1f, 0xe00},
+ { 0xfa404, 255, 0x1f, 0xc00},
+ { 0xfa800, 1, 0x1f, 0xe00},
+ { 0xfa804, 255, 0x1f, 0xc00},
+ { 0xfac00, 1, 0x1f, 0xe00},
+ { 0xfac04, 255, 0x1f, 0xc00},
+ { 0xfb000, 1, 0x1f, 0xe00},
+ { 0xfb004, 255, 0x1f, 0xc00},
+ { 0xfb400, 1, 0x1f, 0xe00},
+ { 0xfb404, 255, 0x1f, 0xc00},
+ { 0xfb800, 1, 0x1f, 0xe00},
+ { 0xfb804, 255, 0x1f, 0xc00},
+ { 0xfbc00, 1, 0x1f, 0xe00},
+ { 0xfbc04, 255, 0x1f, 0xc00},
+ { 0xfc000, 1, 0x1f, 0xe00},
+ { 0xfc004, 255, 0x1f, 0xc00},
+ { 0xfc400, 1, 0x1f, 0xe00},
+ { 0xfc404, 255, 0x1f, 0xc00},
+ { 0xfc800, 1, 0x1f, 0xe00},
+ { 0xfc804, 255, 0x1f, 0xc00},
+ { 0xfcc00, 1, 0x1f, 0xe00},
+ { 0xfcc04, 255, 0x1f, 0xc00},
+ { 0xfd000, 1, 0x1f, 0xe00},
+ { 0xfd004, 255, 0x1f, 0xc00},
+ { 0xfd400, 1, 0x1f, 0xe00},
+ { 0xfd404, 255, 0x1f, 0xc00},
+ { 0xfd800, 1, 0x1f, 0xe00},
+ { 0xfd804, 255, 0x1f, 0xc00},
+ { 0xfdc00, 1, 0x1f, 0xe00},
+ { 0xfdc04, 255, 0x1f, 0xc00},
+ { 0xfe000, 1, 0x1f, 0xe00},
+ { 0xfe004, 255, 0x1f, 0xc00},
+ { 0xfe400, 1, 0x1f, 0xe00},
+ { 0xfe404, 255, 0x1f, 0xc00},
+ { 0xfe800, 1, 0x1f, 0xe00},
+ { 0xfe804, 255, 0x1f, 0xc00},
+ { 0xfec00, 1, 0x1f, 0xe00},
+ { 0xfec04, 255, 0x1f, 0xc00},
+ { 0xff000, 1, 0x1f, 0xe00},
+ { 0xff004, 255, 0x1f, 0xc00},
+ { 0xff400, 1, 0x1f, 0xe00},
+ { 0xff404, 255, 0x1f, 0xc00},
+ { 0xff800, 1, 0x1f, 0xe00},
+ { 0xff804, 255, 0x1f, 0xc00},
+ { 0xffc00, 1, 0x1f, 0xe00},
+ { 0xffc04, 255, 0x1f, 0xc00},
+ { 0x101000, 5, 0x1f, 0x924},
+ { 0x101014, 1, 0x1f, 0xfff},
+ { 0x101018, 6, 0x1f, 0x924},
+ { 0x101040, 2, 0x1f, 0x1fff},
+ { 0x10104c, 1, 0x1f, 0x1fff},
+ { 0x101050, 1, 0x1e, 0x924},
+ { 0x101054, 3, 0x1c, 0x924},
+ { 0x101100, 1, 0x1f, 0x924},
+ { 0x101800, 8, 0x1f, 0x924},
+ { 0x102000, 18, 0x1f, 0x924},
+ { 0x102058, 2, 0x1f, 0x1fff},
+ { 0x102064, 1, 0x1f, 0x1fff},
+ { 0x102068, 6, 0x1c, 0x924},
+ { 0x102080, 16, 0x1f, 0xfff},
+ { 0x1020c0, 1, 0x1f, 0x924},
+ { 0x1020c8, 8, 0x2, 0x924},
+ { 0x1020e8, 9, 0x1c, 0x924},
+ { 0x102400, 1, 0x1f, 0x924},
+ { 0x103000, 1, 0x1f, 0x924},
+ { 0x103004, 2, 0x1f, 0xfff},
+ { 0x10300c, 23, 0x1f, 0x924},
+ { 0x103088, 2, 0x1f, 0x1fff},
+ { 0x103094, 1, 0x1f, 0x1fff},
+ { 0x103098, 1, 0x1e, 0x924},
+ { 0x10309c, 2, 0x1e, 0xfff},
+ { 0x1030a4, 2, 0x1e, 0x924},
+ { 0x1030ac, 2, 0x1c, 0x924},
+ { 0x1030b4, 1, 0x4, 0x924},
+ { 0x1030b8, 2, 0x1c, 0xfff},
+ { 0x1030c0, 3, 0x1c, 0x924},
+ { 0x1030cc, 1, 0x1c, 0xfff},
+ { 0x1030d0, 1, 0x1c, 0x924},
+ { 0x1030d8, 2, 0x1c, 0x924},
+ { 0x1030e0, 1, 0x1c, 0xfff},
+ { 0x1030e4, 5, 0x1c, 0x924},
+ { 0x103400, 136, 0x1c, 0x1fff},
+ { 0x103800, 8, 0x1f, 0x924},
+ { 0x104000, 1, 0x1f, 0x924},
+ { 0x104004, 1, 0x1f, 0xfff},
+ { 0x104008, 4, 0x1f, 0x924},
+ { 0x104018, 1, 0x1f, 0xfff},
+ { 0x10401c, 1, 0x1f, 0x924},
+ { 0x104020, 1, 0x1f, 0xfff},
+ { 0x104024, 6, 0x1f, 0x924},
+ { 0x10403c, 1, 0x1f, 0xfff},
+ { 0x104040, 47, 0x1f, 0x924},
+ { 0x10410c, 2, 0x1f, 0x1fff},
+ { 0x104118, 1, 0x1f, 0x1fff},
+ { 0x10411c, 16, 0x1c, 0x924},
+ { 0x104200, 17, 0x1f, 0x924},
+ { 0x104400, 1, 0x1f, 0x1fff},
+ { 0x104404, 63, 0x1f, 0xfff},
+ { 0x104500, 192, 0x1f, 0xdb6},
+ { 0x104800, 1, 0x1f, 0x1fff},
+ { 0x104804, 63, 0x1f, 0xfff},
+ { 0x104900, 192, 0x1f, 0xdb6},
+ { 0x105000, 4, 0x1f, 0x1fff},
+ { 0x105010, 252, 0x1f, 0xfff},
+ { 0x105400, 768, 0x1f, 0xdb6},
+ { 0x107000, 7, 0x1c, 0x924},
+ { 0x10701c, 1, 0x18, 0x924},
+ { 0x108000, 33, 0x3, 0x924},
+ { 0x1080ac, 5, 0x2, 0x924},
+ { 0x108100, 5, 0x3, 0x924},
+ { 0x108120, 5, 0x3, 0x924},
+ { 0x108200, 74, 0x3, 0x924},
+ { 0x108400, 74, 0x3, 0x924},
+ { 0x108800, 152, 0x3, 0x924},
+ { 0x110000, 111, 0x1c, 0x924},
+ { 0x1101cc, 2, 0x1c, 0x1fff},
+ { 0x1101d8, 1, 0x1c, 0x1fff},
+ { 0x1101dc, 1, 0x18, 0x924},
+ { 0x110200, 4, 0x1c, 0x924},
+ { 0x120000, 92, 0x1f, 0x924},
+ { 0x120170, 2, 0x3, 0x924},
+ { 0x120178, 14, 0x1f, 0x924},
+ { 0x1201b0, 2, 0x1f, 0xfff},
+ { 0x1201b8, 93, 0x1f, 0x924},
+ { 0x12032c, 1, 0x1f, 0xfff},
+ { 0x120330, 15, 0x1f, 0x924},
+ { 0x12036c, 3, 0x1f, 0xfff},
+ { 0x120378, 36, 0x1f, 0x924},
+ { 0x120408, 2, 0x1f, 0xfff},
+ { 0x120410, 1, 0x1f, 0x924},
+ { 0x120414, 15, 0x1f, 0xfff},
+ { 0x120450, 10, 0x1f, 0x924},
+ { 0x120478, 2, 0x1f, 0xfff},
+ { 0x120480, 43, 0x1f, 0x924},
+ { 0x12052c, 1, 0x1f, 0xfff},
+ { 0x120530, 5, 0x1f, 0x924},
+ { 0x120544, 4, 0x3, 0x924},
+ { 0x120554, 4, 0x1f, 0x924},
+ { 0x120564, 2, 0x1f, 0xfff},
+ { 0x12057c, 2, 0x1f, 0x1fff},
+ { 0x120588, 3, 0x1f, 0x1fff},
+ { 0x120598, 1, 0x1f, 0x1fff},
+ { 0x12059c, 22, 0x1e, 0x924},
+ { 0x1205f4, 1, 0x6, 0x924},
+ { 0x1205f8, 4, 0x1c, 0x924},
+ { 0x120618, 1, 0x1c, 0x924},
+ { 0x12061c, 31, 0x1e, 0x924},
+ { 0x120698, 3, 0x1c, 0x924},
+ { 0x1206a4, 1, 0x4, 0x924},
+ { 0x1206a8, 1, 0x1c, 0x924},
+ { 0x1206b0, 38, 0x1c, 0x924},
+ { 0x120748, 1, 0x1c, 0xfff},
+ { 0x12074c, 11, 0x1c, 0x924},
+ { 0x120778, 2, 0x1c, 0xfff},
+ { 0x120780, 23, 0x1c, 0x924},
+ { 0x1207dc, 1, 0x4, 0x924},
+ { 0x1207fc, 1, 0x1c, 0x924},
+ { 0x12080c, 2, 0x1f, 0xfff},
+ { 0x120814, 1, 0x1f, 0x924},
+ { 0x120818, 1, 0x1f, 0xfff},
+ { 0x12081c, 1, 0x1f, 0x924},
+ { 0x120820, 1, 0x1f, 0xfff},
+ { 0x120824, 1, 0x1f, 0x924},
+ { 0x120828, 1, 0x1f, 0xfff},
+ { 0x12082c, 1, 0x1f, 0x924},
+ { 0x120830, 1, 0x1f, 0xfff},
+ { 0x120834, 1, 0x1f, 0x924},
+ { 0x120838, 1, 0x1f, 0xfff},
+ { 0x12083c, 1, 0x1f, 0x924},
+ { 0x120840, 1, 0x1f, 0xfff},
+ { 0x120844, 1, 0x1f, 0x924},
+ { 0x120848, 1, 0x1f, 0xfff},
+ { 0x12084c, 1, 0x1f, 0x924},
+ { 0x120850, 1, 0x1f, 0xfff},
+ { 0x120854, 1, 0x1f, 0x924},
+ { 0x120858, 1, 0x1f, 0xfff},
+ { 0x12085c, 1, 0x1f, 0x924},
+ { 0x120860, 1, 0x1f, 0xfff},
+ { 0x120864, 1, 0x1f, 0x924},
+ { 0x120868, 1, 0x1f, 0xfff},
+ { 0x12086c, 1, 0x1f, 0x924},
+ { 0x120870, 1, 0x1f, 0xfff},
+ { 0x120874, 1, 0x1f, 0x924},
+ { 0x120878, 1, 0x1f, 0xfff},
+ { 0x12087c, 1, 0x1f, 0x924},
+ { 0x120880, 1, 0x1f, 0xfff},
+ { 0x120884, 1, 0x1f, 0x924},
+ { 0x120888, 1, 0x1f, 0xfff},
+ { 0x12088c, 1, 0x1f, 0x924},
+ { 0x120890, 1, 0x1f, 0xfff},
+ { 0x120894, 1, 0x1f, 0x924},
+ { 0x120898, 1, 0x1f, 0xfff},
+ { 0x12089c, 1, 0x1f, 0x924},
+ { 0x1208a0, 1, 0x1f, 0xfff},
+ { 0x1208a4, 1, 0x1f, 0x924},
+ { 0x1208a8, 1, 0x1f, 0xfff},
+ { 0x1208ac, 1, 0x1f, 0x924},
+ { 0x1208b0, 1, 0x1f, 0xfff},
+ { 0x1208b4, 1, 0x1f, 0x924},
+ { 0x1208b8, 1, 0x1f, 0xfff},
+ { 0x1208bc, 1, 0x1f, 0x924},
+ { 0x1208c0, 1, 0x1f, 0xfff},
+ { 0x1208c4, 1, 0x1f, 0x924},
+ { 0x1208c8, 1, 0x1f, 0xfff},
+ { 0x1208cc, 1, 0x1f, 0x924},
+ { 0x1208d0, 1, 0x1f, 0xfff},
+ { 0x1208d4, 1, 0x1f, 0x924},
+ { 0x1208d8, 1, 0x1f, 0xfff},
+ { 0x1208dc, 1, 0x1f, 0x924},
+ { 0x1208e0, 1, 0x1f, 0xfff},
+ { 0x1208e4, 1, 0x1f, 0x924},
+ { 0x1208e8, 1, 0x1f, 0xfff},
+ { 0x1208ec, 1, 0x1f, 0x924},
+ { 0x1208f0, 1, 0x1f, 0xfff},
+ { 0x1208f4, 1, 0x1f, 0x924},
+ { 0x1208f8, 1, 0x1f, 0xfff},
+ { 0x1208fc, 1, 0x1f, 0x924},
+ { 0x120900, 1, 0x1f, 0xfff},
+ { 0x120904, 1, 0x1f, 0x924},
+ { 0x120908, 1, 0x1f, 0xfff},
+ { 0x12090c, 1, 0x1f, 0x924},
+ { 0x120910, 7, 0x1c, 0x924},
+ { 0x120930, 9, 0x1c, 0x924},
+ { 0x12095c, 37, 0x18, 0x924},
+ { 0x120a00, 2, 0x7, 0x924},
+ { 0x120b00, 1, 0x18, 0x924},
+ { 0x122000, 2, 0x1f, 0x924},
+ { 0x122008, 2046, 0x1, 0x924},
+ { 0x128000, 6144, 0x1e, 0x924},
+ { 0x130000, 1, 0x1c, 0x1fff},
+ { 0x130004, 11, 0x1c, 0x924},
+ { 0x130030, 1, 0x1c, 0xfff},
+ { 0x130034, 6, 0x1c, 0x924},
+ { 0x13004c, 3, 0x1c, 0xfff},
+ { 0x130058, 3, 0x1c, 0x924},
+ { 0x130064, 2, 0x1c, 0xfff},
+ { 0x13006c, 8, 0x1c, 0x924},
+ { 0x13009c, 2, 0x1c, 0x1fff},
+ { 0x1300a8, 1, 0x1c, 0x1fff},
+ { 0x130100, 12, 0x1c, 0x924},
+ { 0x130130, 1, 0x1c, 0xfff},
+ { 0x130134, 14, 0x1c, 0x924},
+ { 0x13016c, 1, 0x1c, 0xfff},
+ { 0x130170, 1, 0x1c, 0x924},
+ { 0x130180, 1, 0x1c, 0x924},
+ { 0x130200, 1, 0x1c, 0x924},
+ { 0x130280, 1, 0x1c, 0x924},
+ { 0x130300, 1, 0x1c, 0xfff},
+ { 0x130304, 4, 0x1c, 0x924},
+ { 0x130380, 1, 0x1c, 0x924},
+ { 0x130400, 1, 0x1c, 0x924},
+ { 0x130480, 1, 0x1c, 0xfff},
+ { 0x130484, 4, 0x1c, 0x924},
+ { 0x130800, 72, 0x1c, 0x924},
+ { 0x131000, 136, 0x1c, 0x924},
+ { 0x132000, 148, 0x1c, 0x924},
+ { 0x134000, 544, 0x1c, 0x924},
+ { 0x140000, 1, 0x1f, 0x924},
+ { 0x140004, 9, 0xf, 0x924},
+ { 0x140028, 8, 0x1f, 0x924},
+ { 0x140048, 5, 0xf, 0x924},
+ { 0x14005c, 2, 0xf, 0xfff},
+ { 0x140064, 3, 0xf, 0x924},
+ { 0x140070, 1, 0x1f, 0x924},
+ { 0x140074, 10, 0xf, 0x924},
+ { 0x14009c, 1, 0x1f, 0x924},
+ { 0x1400a0, 5, 0xf, 0x924},
+ { 0x1400b4, 7, 0x1f, 0x924},
+ { 0x1400d0, 2, 0xf, 0xfff},
+ { 0x1400d8, 2, 0xf, 0x924},
+ { 0x1400e0, 1, 0xf, 0xfff},
+ { 0x1400e4, 5, 0xf, 0x924},
+ { 0x1400f8, 2, 0x1f, 0x924},
+ { 0x140100, 5, 0x3, 0x924},
+ { 0x140114, 5, 0xf, 0x924},
+ { 0x140128, 7, 0x1f, 0x924},
+ { 0x140144, 9, 0xf, 0x924},
+ { 0x140168, 8, 0x1f, 0x924},
+ { 0x140188, 3, 0xf, 0x924},
+ { 0x140194, 13, 0x1f, 0x924},
+ { 0x1401d8, 2, 0x1f, 0x1fff},
+ { 0x1401e4, 1, 0x1f, 0x1fff},
+ { 0x140200, 6, 0xf, 0xfff},
+ { 0x1402e0, 2, 0xc, 0x924},
+ { 0x1402e8, 2, 0x1c, 0x924},
+ { 0x1402f0, 9, 0xc, 0x924},
+ { 0x140314, 9, 0x10, 0x924},
+ { 0x140338, 7, 0x10, 0xfff},
+ { 0x140354, 7, 0x10, 0x924},
+ { 0x140370, 7, 0x10, 0xfff},
+ { 0x14038c, 14, 0x10, 0x924},
+ { 0x1404b0, 14, 0x10, 0x924},
+ { 0x15c000, 2, 0x1e, 0x924},
+ { 0x15c008, 5, 0x2, 0x924},
+ { 0x15c020, 8, 0x1c, 0x924},
+ { 0x15c040, 1, 0xc, 0x924},
+ { 0x15c044, 2, 0x1c, 0x924},
+ { 0x15c04c, 8, 0xc, 0x924},
+ { 0x15c06c, 8, 0x1c, 0x924},
+ { 0x15c090, 13, 0x1c, 0x924},
+ { 0x15c0c8, 24, 0x1c, 0x924},
+ { 0x15c128, 2, 0xc, 0x924},
+ { 0x15c130, 1, 0x1c, 0x924},
+ { 0x15c138, 6, 0x1c, 0x924},
+ { 0x15c150, 2, 0x18, 0x924},
+ { 0x15c158, 2, 0x8, 0x924},
+ { 0x15c160, 23, 0x10, 0x924},
+ { 0x15c1bc, 6, 0x10, 0xfff},
+ { 0x15c1d4, 23, 0x10, 0x924},
+ { 0x15c230, 7, 0x10, 0xfff},
+ { 0x15c24c, 90, 0x10, 0x924},
+ { 0x160004, 6, 0x18, 0x924},
+ { 0x16003c, 1, 0x10, 0x924},
+ { 0x160040, 6, 0x18, 0x924},
+ { 0x16005c, 6, 0x18, 0x924},
+ { 0x160074, 1, 0x10, 0x924},
+ { 0x160078, 2, 0x18, 0x924},
+ { 0x160300, 8, 0x18, 0x924},
+ { 0x160330, 6, 0x18, 0x924},
+ { 0x160404, 6, 0x18, 0x924},
+ { 0x16043c, 1, 0x10, 0x924},
+ { 0x160440, 6, 0x18, 0x924},
+ { 0x16045c, 6, 0x18, 0x924},
+ { 0x160474, 1, 0x10, 0x924},
+ { 0x160478, 2, 0x18, 0x924},
+ { 0x160700, 8, 0x18, 0x924},
+ { 0x160730, 6, 0x18, 0x924},
+ { 0x161000, 7, 0x1f, 0x924},
+ { 0x16102c, 2, 0x1f, 0x1fff},
+ { 0x161038, 1, 0x1f, 0x1fff},
+ { 0x16103c, 2, 0x1c, 0x924},
+ { 0x161800, 2, 0x1f, 0x924},
+ { 0x162000, 54, 0x18, 0x924},
+ { 0x162200, 60, 0x18, 0x924},
+ { 0x162400, 54, 0x18, 0x924},
+ { 0x162600, 60, 0x18, 0x924},
+ { 0x162800, 54, 0x18, 0x924},
+ { 0x162a00, 60, 0x18, 0x924},
+ { 0x162c00, 54, 0x18, 0x924},
+ { 0x162e00, 60, 0x18, 0x924},
+ { 0x163000, 1, 0x18, 0x924},
+ { 0x163008, 1, 0x18, 0x924},
+ { 0x163010, 1, 0x18, 0x924},
+ { 0x163018, 1, 0x18, 0x924},
+ { 0x163020, 5, 0x18, 0x924},
+ { 0x163038, 3, 0x18, 0x924},
+ { 0x163048, 3, 0x18, 0x924},
+ { 0x163058, 1, 0x18, 0x924},
+ { 0x163060, 1, 0x18, 0x924},
+ { 0x163068, 1, 0x18, 0x924},
+ { 0x163070, 3, 0x18, 0x924},
+ { 0x163080, 1, 0x18, 0x924},
+ { 0x163088, 3, 0x18, 0x924},
+ { 0x163098, 1, 0x18, 0x924},
+ { 0x1630a0, 1, 0x18, 0x924},
+ { 0x1630a8, 1, 0x18, 0x924},
+ { 0x1630b0, 2, 0x10, 0x924},
+ { 0x1630c0, 1, 0x18, 0x924},
+ { 0x1630c8, 1, 0x18, 0x924},
+ { 0x1630d0, 1, 0x18, 0x924},
+ { 0x1630d8, 1, 0x18, 0x924},
+ { 0x1630e0, 2, 0x18, 0x924},
+ { 0x163110, 1, 0x18, 0x924},
+ { 0x163120, 2, 0x18, 0x924},
+ { 0x163420, 4, 0x18, 0x924},
+ { 0x163438, 2, 0x18, 0x924},
+ { 0x163488, 2, 0x18, 0x924},
+ { 0x163520, 2, 0x18, 0x924},
+ { 0x163800, 1, 0x18, 0x924},
+ { 0x163808, 1, 0x18, 0x924},
+ { 0x163810, 1, 0x18, 0x924},
+ { 0x163818, 1, 0x18, 0x924},
+ { 0x163820, 5, 0x18, 0x924},
+ { 0x163838, 3, 0x18, 0x924},
+ { 0x163848, 3, 0x18, 0x924},
+ { 0x163858, 1, 0x18, 0x924},
+ { 0x163860, 1, 0x18, 0x924},
+ { 0x163868, 1, 0x18, 0x924},
+ { 0x163870, 3, 0x18, 0x924},
+ { 0x163880, 1, 0x18, 0x924},
+ { 0x163888, 3, 0x18, 0x924},
+ { 0x163898, 1, 0x18, 0x924},
+ { 0x1638a0, 1, 0x18, 0x924},
+ { 0x1638a8, 1, 0x18, 0x924},
+ { 0x1638b0, 2, 0x10, 0x924},
+ { 0x1638c0, 1, 0x18, 0x924},
+ { 0x1638c8, 1, 0x18, 0x924},
+ { 0x1638d0, 1, 0x18, 0x924},
+ { 0x1638d8, 1, 0x18, 0x924},
+ { 0x1638e0, 2, 0x18, 0x924},
+ { 0x163910, 1, 0x18, 0x924},
+ { 0x163920, 2, 0x18, 0x924},
+ { 0x163c20, 4, 0x18, 0x924},
+ { 0x163c38, 2, 0x18, 0x924},
+ { 0x163c88, 2, 0x18, 0x924},
+ { 0x163d20, 2, 0x18, 0x924},
+ { 0x164000, 5, 0x1f, 0x924},
+ { 0x164014, 2, 0x1f, 0xfff},
+ { 0x16401c, 53, 0x1f, 0x924},
+ { 0x164100, 2, 0x1f, 0x1fff},
+ { 0x16410c, 1, 0x1f, 0x1fff},
+ { 0x164110, 2, 0x1e, 0x924},
+ { 0x164118, 15, 0x1c, 0x924},
+ { 0x164200, 1, 0x1f, 0x924},
+ { 0x164208, 1, 0x1f, 0x924},
+ { 0x164210, 1, 0x1f, 0x924},
+ { 0x164218, 1, 0x1f, 0x924},
+ { 0x164220, 1, 0x1f, 0x924},
+ { 0x164228, 1, 0x1f, 0x924},
+ { 0x164230, 1, 0x1f, 0x924},
+ { 0x164238, 1, 0x1f, 0x924},
+ { 0x164240, 1, 0x1f, 0x924},
+ { 0x164248, 1, 0x1f, 0x924},
+ { 0x164250, 1, 0x1f, 0x924},
+ { 0x164258, 1, 0x1f, 0x924},
+ { 0x164260, 1, 0x1f, 0x924},
+ { 0x164270, 2, 0x1f, 0x924},
+ { 0x164280, 2, 0x1f, 0x924},
+ { 0x164800, 2, 0x1f, 0x924},
+ { 0x165000, 2, 0x1f, 0x924},
+ { 0x166000, 164, 0x1f, 0x924},
+ { 0x1662b0, 2, 0x1f, 0x1fff},
+ { 0x1662bc, 1, 0x1f, 0x1fff},
+ { 0x1662cc, 7, 0x1c, 0x924},
+ { 0x166400, 49, 0x1f, 0x924},
+ { 0x1664c8, 32, 0x1f, 0x924},
+ { 0x166548, 1, 0x1f, 0xfff},
+ { 0x16654c, 1, 0x1f, 0x924},
+ { 0x166550, 1, 0x1f, 0xfff},
+ { 0x166554, 1, 0x1f, 0x924},
+ { 0x166558, 1, 0x1f, 0xfff},
+ { 0x16655c, 1, 0x1f, 0x924},
+ { 0x166568, 2, 0x1f, 0x924},
+ { 0x166570, 5, 0x1c, 0x924},
+ { 0x166800, 1, 0x1f, 0x924},
+ { 0x168000, 1, 0x1f, 0xfff},
+ { 0x168004, 1, 0x1f, 0x924},
+ { 0x168008, 1, 0x1f, 0xfff},
+ { 0x16800c, 1, 0x1f, 0x924},
+ { 0x168010, 1, 0x1f, 0xfff},
+ { 0x168014, 1, 0x1f, 0x924},
+ { 0x168018, 1, 0x1f, 0xfff},
+ { 0x16801c, 3, 0x1f, 0x924},
+ { 0x168028, 2, 0x1f, 0xfff},
+ { 0x168030, 10, 0x1f, 0x924},
+ { 0x168058, 9, 0x1f, 0xfff},
+ { 0x16807c, 106, 0x1f, 0x924},
+ { 0x168224, 2, 0x3, 0x924},
+ { 0x16822c, 3, 0x1f, 0x924},
+ { 0x168238, 1, 0x1f, 0xfff},
+ { 0x16823c, 25, 0x1f, 0x924},
+ { 0x1682a0, 12, 0x3, 0x924},
+ { 0x1682d0, 7, 0x1f, 0xfff},
+ { 0x1682ec, 5, 0x1f, 0x924},
+ { 0x168300, 2, 0x3, 0xfff},
+ { 0x168308, 65, 0x1f, 0xfff},
+ { 0x16840c, 1, 0x1f, 0x924},
+ { 0x168410, 2, 0x1f, 0xfff},
+ { 0x168418, 2, 0x3, 0x924},
+ { 0x168420, 6, 0x1f, 0x924},
+ { 0x168448, 2, 0x1f, 0x1fff},
+ { 0x168454, 1, 0x1f, 0x1fff},
+ { 0x168800, 19, 0x1f, 0x924},
+ { 0x168900, 1, 0x1f, 0x924},
+ { 0x168a00, 128, 0x1f, 0xfff},
+ { 0x16a000, 1536, 0x1f, 0x924},
+ { 0x16c000, 1536, 0x1f, 0x924},
+ { 0x16e000, 16, 0x2, 0x924},
+ { 0x16e040, 8, 0x1c, 0x924},
+ { 0x16e100, 1, 0x2, 0x924},
+ { 0x16e200, 2, 0x2, 0xfff},
+ { 0x16e400, 1, 0x2, 0x924},
+ { 0x16e404, 2, 0x2, 0xfff},
+ { 0x16e40c, 94, 0x2, 0x924},
+ { 0x16e584, 64, 0x2, 0xfff},
+ { 0x16e684, 2, 0x1e, 0xfff},
+ { 0x16e68c, 4, 0x2, 0xfff},
+ { 0x16e69c, 8, 0x2, 0x924},
+ { 0x16e6bc, 4, 0x1e, 0x924},
+ { 0x16e6cc, 4, 0x2, 0x924},
+ { 0x16e6e0, 2, 0x1c, 0x924},
+ { 0x16e6e8, 5, 0xc, 0x924},
+ { 0x16e6fc, 4, 0x1c, 0xfff},
+ { 0x16e70c, 1, 0x1c, 0x924},
+ { 0x16e768, 17, 0x1c, 0x924},
+ { 0x16e7ac, 12, 0x10, 0xfff},
+ { 0x170000, 24, 0x1f, 0x924},
+ { 0x170060, 4, 0x3, 0x924},
+ { 0x170070, 13, 0x1f, 0x924},
+ { 0x1700a4, 1, 0x1f, 0xfff},
+ { 0x1700a8, 1, 0x1f, 0x924},
+ { 0x1700ac, 2, 0x1f, 0xfff},
+ { 0x1700b4, 3, 0x1f, 0x924},
+ { 0x1700c0, 1, 0x1f, 0xfff},
+ { 0x1700c4, 44, 0x1f, 0x924},
+ { 0x170184, 2, 0x1f, 0x1fff},
+ { 0x170190, 1, 0x1f, 0x1fff},
+ { 0x170194, 11, 0x1c, 0x924},
+ { 0x1701c4, 1, 0x1c, 0x924},
+ { 0x1701cc, 7, 0x1c, 0x924},
+ { 0x1701e8, 1, 0x18, 0x924},
+ { 0x1701ec, 1, 0x1c, 0x924},
+ { 0x1701f4, 1, 0x1c, 0x924},
+ { 0x170200, 4, 0x1f, 0x924},
+ { 0x170214, 1, 0x1f, 0x924},
+ { 0x170218, 77, 0x1c, 0x924},
+ { 0x170400, 64, 0x1c, 0x924},
+ { 0x178000, 1, 0x1f, 0x924},
+ { 0x180000, 61, 0x1f, 0x924},
+ { 0x180114, 2, 0x1f, 0x1fff},
+ { 0x180120, 3, 0x1f, 0x1fff},
+ { 0x180130, 1, 0x1f, 0x1fff},
+ { 0x18013c, 2, 0x1e, 0x924},
+ { 0x180200, 27, 0x1f, 0x924},
+ { 0x18026c, 1, 0x1f, 0xfff},
+ { 0x180270, 12, 0x1f, 0x924},
+ { 0x1802a0, 1, 0x1f, 0xfff},
+ { 0x1802a4, 17, 0x1f, 0x924},
+ { 0x180340, 4, 0x1f, 0x924},
+ { 0x180380, 1, 0x1c, 0x924},
+ { 0x180388, 1, 0x1c, 0x924},
+ { 0x180390, 1, 0x1c, 0x924},
+ { 0x180398, 1, 0x1c, 0x924},
+ { 0x1803a0, 5, 0x1c, 0x924},
+ { 0x1803b4, 2, 0x18, 0x924},
+ { 0x180400, 256, 0x3, 0xfff},
+ { 0x181000, 4, 0x1f, 0x93c},
+ { 0x181010, 1020, 0x1f, 0x38},
+ { 0x182000, 4, 0x18, 0x924},
+ { 0x1a0000, 1, 0x1f, 0x92c},
+ { 0x1a0004, 5631, 0x1f, 0x8},
+ { 0x1a5800, 2560, 0x1e, 0x8},
+ { 0x1a8000, 1, 0x1f, 0x92c},
+ { 0x1a8004, 8191, 0x1e, 0x8},
+ { 0x1b0000, 1, 0x1f, 0x92c},
+ { 0x1b0004, 15, 0x2, 0x8},
+ { 0x1b0040, 1, 0x1e, 0x92c},
+ { 0x1b0044, 239, 0x2, 0x8},
+ { 0x1b0400, 1, 0x1f, 0x92c},
+ { 0x1b0404, 255, 0x2, 0x8},
+ { 0x1b0800, 1, 0x1f, 0x924},
+ { 0x1b0840, 1, 0x1e, 0x924},
+ { 0x1b0c00, 1, 0x1f, 0x1fff},
+ { 0x1b1000, 1, 0x1f, 0x1fff},
+ { 0x1b1040, 1, 0x1e, 0x1fff},
+ { 0x1b1400, 1, 0x1f, 0x924},
+ { 0x1b1440, 1, 0x1e, 0x924},
+ { 0x1b1480, 1, 0x1e, 0x924},
+ { 0x1b14c0, 1, 0x1e, 0x924},
+ { 0x1b1800, 128, 0x1f, 0x10},
+ { 0x1b1c00, 128, 0x1f, 0x10},
+ { 0x1b2000, 1, 0x1f, 0xdb6},
+ { 0x1b2400, 1, 0x1e, 0x92c},
+ { 0x1b2404, 5631, 0x1c, 0x8},
+ { 0x1b8000, 1, 0x1f, 0xfff},
+ { 0x1b8040, 1, 0x1f, 0xfff},
+ { 0x1b8080, 1, 0x1f, 0xfff},
+ { 0x1b80c0, 1, 0x1f, 0xfff},
+ { 0x1b8100, 1, 0x1f, 0x924},
+ { 0x1b8140, 1, 0x1f, 0x924},
+ { 0x1b8180, 1, 0x1f, 0x924},
+ { 0x1b81c0, 1, 0x1f, 0x924},
+ { 0x1b8200, 1, 0x1f, 0x924},
+ { 0x1b8240, 1, 0x1f, 0x924},
+ { 0x1b8280, 1, 0x1f, 0x924},
+ { 0x1b82c0, 1, 0x1f, 0x924},
+ { 0x1b8300, 1, 0x1f, 0x924},
+ { 0x1b8340, 1, 0x1f, 0x924},
+ { 0x1b8380, 1, 0x1f, 0x924},
+ { 0x1b83c0, 1, 0x1f, 0x924},
+ { 0x1b8400, 1, 0x1f, 0x924},
+ { 0x1b8440, 1, 0x1f, 0x924},
+ { 0x1b8480, 1, 0x1f, 0x924},
+ { 0x1b84c0, 1, 0x1f, 0x924},
+ { 0x1b8500, 1, 0x1f, 0x924},
+ { 0x1b8540, 1, 0x1f, 0x924},
+ { 0x1b8580, 1, 0x1f, 0x924},
+ { 0x1b85c0, 19, 0x1c, 0x924},
+ { 0x1b8800, 1, 0x1f, 0x924},
+ { 0x1b8840, 1, 0x1f, 0x924},
+ { 0x1b8880, 1, 0x1f, 0x924},
+ { 0x1b88c0, 1, 0x1f, 0x924},
+ { 0x1b8900, 1, 0x1f, 0x924},
+ { 0x1b8940, 1, 0x1f, 0x924},
+ { 0x1b8980, 1, 0x1f, 0x924},
+ { 0x1b89c0, 1, 0x1f, 0x924},
+ { 0x1b8a00, 1, 0x1f, 0x934},
+ { 0x1b8a40, 1, 0x1f, 0x924},
+ { 0x1b8a80, 1, 0x1f, 0x492},
+ { 0x1b8ac0, 1, 0x1f, 0x924},
+ { 0x1b8b00, 1, 0x1f, 0x924},
+ { 0x1b8b40, 1, 0x1f, 0x924},
+ { 0x1b8b80, 1, 0x1f, 0x924},
+ { 0x1b8bc0, 1, 0x1f, 0x924},
+ { 0x1b8c00, 1, 0x1f, 0x924},
+ { 0x1b8c40, 1, 0x1f, 0x924},
+ { 0x1b8c80, 1, 0x1f, 0x924},
+ { 0x1b8cc0, 1, 0x1f, 0x924},
+ { 0x1b8cc4, 1, 0x1c, 0x924},
+ { 0x1b8d00, 1, 0x1f, 0x924},
+ { 0x1b8d40, 1, 0x1f, 0x924},
+ { 0x1b8d80, 1, 0x1f, 0x924},
+ { 0x1b8dc0, 1, 0x1f, 0x924},
+ { 0x1b8e00, 1, 0x1f, 0x924},
+ { 0x1b8e40, 1, 0x1f, 0x924},
+ { 0x1b8e80, 1, 0x1f, 0x924},
+ { 0x1b8e84, 1, 0x1c, 0x924},
+ { 0x1b8ec0, 1, 0x1e, 0x924},
+ { 0x1b8f00, 1, 0x1e, 0x924},
+ { 0x1b8f40, 1, 0x1e, 0x924},
+ { 0x1b8f80, 1, 0x1e, 0x924},
+ { 0x1b8fc0, 1, 0x1e, 0x924},
+ { 0x1b8fd4, 5, 0x1c, 0x924},
+ { 0x1b8fe8, 2, 0x18, 0x924},
+ { 0x1b9000, 1, 0x1c, 0x924},
+ { 0x1b9040, 3, 0x1c, 0x924},
+ { 0x1b905c, 1, 0x18, 0x924},
+ { 0x1b9064, 1, 0x10, 0x924},
+ { 0x1b9080, 10, 0x10, 0x924},
+ { 0x1c0000, 2, 0x1f, 0x924},
+ { 0x200000, 65, 0x1f, 0x924},
+ { 0x200124, 2, 0x1f, 0x1fff},
+ { 0x200130, 3, 0x1f, 0x1fff},
+ { 0x200140, 1, 0x1f, 0x1fff},
+ { 0x20014c, 2, 0x1e, 0x924},
+ { 0x200200, 27, 0x1f, 0x924},
+ { 0x20026c, 1, 0x1f, 0xfff},
+ { 0x200270, 12, 0x1f, 0x924},
+ { 0x2002a0, 1, 0x1f, 0xfff},
+ { 0x2002a4, 17, 0x1f, 0x924},
+ { 0x200340, 4, 0x1f, 0x924},
+ { 0x200380, 1, 0x1c, 0x924},
+ { 0x200388, 1, 0x1c, 0x924},
+ { 0x200390, 1, 0x1c, 0x924},
+ { 0x200398, 1, 0x1c, 0x924},
+ { 0x2003a0, 1, 0x1c, 0x924},
+ { 0x2003a8, 2, 0x1c, 0x924},
+ { 0x200400, 256, 0x3, 0xfff},
+ { 0x202000, 4, 0x1f, 0x1927},
+ { 0x202010, 2044, 0x1f, 0x1007},
+ { 0x204000, 4, 0x18, 0x924},
+ { 0x220000, 1, 0x1f, 0x925},
+ { 0x220004, 5631, 0x1f, 0x1},
+ { 0x225800, 2560, 0x1e, 0x1},
+ { 0x228000, 1, 0x1f, 0x925},
+ { 0x228004, 8191, 0x1e, 0x1},
+ { 0x230000, 1, 0x1f, 0x925},
+ { 0x230004, 15, 0x2, 0x1},
+ { 0x230040, 1, 0x1e, 0x925},
+ { 0x230044, 239, 0x2, 0x1},
+ { 0x230400, 1, 0x1f, 0x925},
+ { 0x230404, 255, 0x2, 0x1},
+ { 0x230800, 1, 0x1f, 0x924},
+ { 0x230840, 1, 0x1e, 0x924},
+ { 0x230c00, 1, 0x1f, 0x924},
+ { 0x231000, 1, 0x1f, 0x924},
+ { 0x231040, 1, 0x1e, 0x924},
+ { 0x231400, 1, 0x1f, 0x924},
+ { 0x231440, 1, 0x1e, 0x924},
+ { 0x231480, 1, 0x1e, 0x924},
+ { 0x2314c0, 1, 0x1e, 0x924},
+ { 0x231800, 128, 0x1f, 0x2},
+ { 0x231c00, 128, 0x1f, 0x2},
+ { 0x232000, 1, 0x1f, 0xdb6},
+ { 0x232400, 1, 0x1e, 0x925},
+ { 0x232404, 5631, 0x1c, 0x1},
+ { 0x238000, 1, 0x1f, 0xfff},
+ { 0x238040, 1, 0x1f, 0xfff},
+ { 0x238080, 1, 0x1f, 0xfff},
+ { 0x2380c0, 1, 0x1f, 0xfff},
+ { 0x238100, 1, 0x1f, 0x924},
+ { 0x238140, 1, 0x1f, 0x924},
+ { 0x238180, 1, 0x1f, 0x924},
+ { 0x2381c0, 1, 0x1f, 0x924},
+ { 0x238200, 1, 0x1f, 0x924},
+ { 0x238240, 1, 0x1f, 0x924},
+ { 0x238280, 1, 0x1f, 0x924},
+ { 0x2382c0, 1, 0x1f, 0x924},
+ { 0x238300, 1, 0x1f, 0x924},
+ { 0x238340, 1, 0x1f, 0x924},
+ { 0x238380, 1, 0x1f, 0x924},
+ { 0x2383c0, 1, 0x1f, 0x924},
+ { 0x238400, 1, 0x1f, 0x924},
+ { 0x238440, 1, 0x1f, 0x924},
+ { 0x238480, 1, 0x1f, 0x924},
+ { 0x2384c0, 1, 0x1f, 0x924},
+ { 0x238500, 1, 0x1f, 0x924},
+ { 0x238540, 1, 0x1f, 0x924},
+ { 0x238580, 1, 0x1f, 0x924},
+ { 0x2385c0, 19, 0x1c, 0x924},
+ { 0x238800, 1, 0x1f, 0x924},
+ { 0x238840, 1, 0x1f, 0x924},
+ { 0x238880, 1, 0x1f, 0x924},
+ { 0x2388c0, 1, 0x1f, 0x924},
+ { 0x238900, 1, 0x1f, 0x924},
+ { 0x238940, 1, 0x1f, 0x924},
+ { 0x238980, 1, 0x1f, 0x924},
+ { 0x2389c0, 1, 0x1f, 0x924},
+ { 0x238a00, 1, 0x1f, 0x926},
+ { 0x238a40, 1, 0x1f, 0x924},
+ { 0x238a80, 1, 0x1f, 0x492},
+ { 0x238ac0, 1, 0x1f, 0x924},
+ { 0x238b00, 1, 0x1f, 0x924},
+ { 0x238b40, 1, 0x1f, 0x924},
+ { 0x238b80, 1, 0x1f, 0x924},
+ { 0x238bc0, 1, 0x1f, 0x924},
+ { 0x238c00, 1, 0x1f, 0x924},
+ { 0x238c40, 1, 0x1f, 0x924},
+ { 0x238c80, 1, 0x1f, 0x924},
+ { 0x238cc0, 1, 0x1f, 0x924},
+ { 0x238cc4, 1, 0x1c, 0x924},
+ { 0x238d00, 1, 0x1f, 0x924},
+ { 0x238d40, 1, 0x1f, 0x924},
+ { 0x238d80, 1, 0x1f, 0x924},
+ { 0x238dc0, 1, 0x1f, 0x924},
+ { 0x238e00, 1, 0x1f, 0x924},
+ { 0x238e40, 1, 0x1f, 0x924},
+ { 0x238e80, 1, 0x1f, 0x924},
+ { 0x238e84, 1, 0x1c, 0x924},
+ { 0x238ec0, 1, 0x1e, 0x924},
+ { 0x238f00, 1, 0x1e, 0x924},
+ { 0x238f40, 1, 0x1e, 0x924},
+ { 0x238f80, 1, 0x1e, 0x924},
+ { 0x238fc0, 1, 0x1e, 0x924},
+ { 0x238fd4, 5, 0x1c, 0x924},
+ { 0x238fe8, 2, 0x18, 0x924},
+ { 0x239000, 1, 0x1c, 0x924},
+ { 0x239040, 3, 0x1c, 0x924},
+ { 0x23905c, 1, 0x18, 0x924},
+ { 0x239064, 1, 0x10, 0x924},
+ { 0x239080, 10, 0x10, 0x924},
+ { 0x240000, 2, 0x1f, 0x924},
+ { 0x280000, 65, 0x1f, 0x924},
+ { 0x280124, 2, 0x1f, 0x1fff},
+ { 0x280130, 3, 0x1f, 0x1fff},
+ { 0x280140, 1, 0x1f, 0x1fff},
+ { 0x28014c, 2, 0x1e, 0x924},
+ { 0x280200, 27, 0x1f, 0x924},
+ { 0x28026c, 1, 0x1f, 0xfff},
+ { 0x280270, 12, 0x1f, 0x924},
+ { 0x2802a0, 1, 0x1f, 0xfff},
+ { 0x2802a4, 17, 0x1f, 0x924},
+ { 0x280340, 4, 0x1f, 0x924},
+ { 0x280380, 1, 0x1c, 0x924},
+ { 0x280388, 1, 0x1c, 0x924},
+ { 0x280390, 1, 0x1c, 0x924},
+ { 0x280398, 1, 0x1c, 0x924},
+ { 0x2803a0, 1, 0x1c, 0x924},
+ { 0x2803a8, 2, 0x1c, 0x924},
+ { 0x280400, 256, 0x3, 0xfff},
+ { 0x282000, 4, 0x1f, 0x9e4},
+ { 0x282010, 2044, 0x1f, 0x1c0},
+ { 0x284000, 4, 0x18, 0x924},
+ { 0x2a0000, 1, 0x1f, 0x964},
+ { 0x2a0004, 5631, 0x1f, 0x40},
+ { 0x2a5800, 2560, 0x1e, 0x40},
+ { 0x2a8000, 1, 0x1f, 0x964},
+ { 0x2a8004, 8191, 0x1e, 0x40},
+ { 0x2b0000, 1, 0x1f, 0x964},
+ { 0x2b0004, 15, 0x2, 0x40},
+ { 0x2b0040, 1, 0x1e, 0x964},
+ { 0x2b0044, 239, 0x2, 0x40},
+ { 0x2b0400, 1, 0x1f, 0x964},
+ { 0x2b0404, 255, 0x2, 0x40},
+ { 0x2b0800, 1, 0x1f, 0x924},
+ { 0x2b0840, 1, 0x1e, 0x924},
+ { 0x2b0c00, 1, 0x1f, 0x924},
+ { 0x2b1000, 1, 0x1f, 0x924},
+ { 0x2b1040, 1, 0x1e, 0x924},
+ { 0x2b1400, 1, 0x1f, 0x924},
+ { 0x2b1440, 1, 0x1e, 0x924},
+ { 0x2b1480, 1, 0x1e, 0x924},
+ { 0x2b14c0, 1, 0x1e, 0x924},
+ { 0x2b1800, 128, 0x1f, 0x80},
+ { 0x2b1c00, 128, 0x1f, 0x80},
+ { 0x2b2000, 1, 0x1f, 0xdb6},
+ { 0x2b2400, 1, 0x1e, 0x964},
+ { 0x2b2404, 5631, 0x1c, 0x40},
+ { 0x2b8000, 1, 0x1f, 0xfff},
+ { 0x2b8040, 1, 0x1f, 0xfff},
+ { 0x2b8080, 1, 0x1f, 0xfff},
+ { 0x2b80c0, 1, 0x1f, 0x924},
+ { 0x2b8100, 1, 0x1f, 0x924},
+ { 0x2b8140, 1, 0x1f, 0x924},
+ { 0x2b8180, 1, 0x1f, 0x924},
+ { 0x2b81c0, 1, 0x1f, 0x924},
+ { 0x2b8200, 1, 0x1f, 0x924},
+ { 0x2b8240, 1, 0x1f, 0x924},
+ { 0x2b8280, 1, 0x1f, 0x924},
+ { 0x2b82c0, 1, 0x1f, 0x924},
+ { 0x2b8300, 1, 0x1f, 0x924},
+ { 0x2b8340, 1, 0x1f, 0x924},
+ { 0x2b8380, 1, 0x1f, 0x924},
+ { 0x2b83c0, 1, 0x1f, 0x924},
+ { 0x2b8400, 1, 0x1f, 0x924},
+ { 0x2b8440, 1, 0x1f, 0x924},
+ { 0x2b8480, 1, 0x1f, 0x924},
+ { 0x2b84c0, 1, 0x1f, 0x924},
+ { 0x2b8500, 1, 0x1f, 0x924},
+ { 0x2b8540, 1, 0x1f, 0x924},
+ { 0x2b8580, 1, 0x1f, 0x924},
+ { 0x2b85c0, 19, 0x1c, 0x924},
+ { 0x2b8800, 1, 0x1f, 0x924},
+ { 0x2b8840, 1, 0x1f, 0x924},
+ { 0x2b8880, 1, 0x1f, 0x924},
+ { 0x2b88c0, 1, 0x1f, 0x924},
+ { 0x2b8900, 1, 0x1f, 0x924},
+ { 0x2b8940, 1, 0x1f, 0x924},
+ { 0x2b8980, 1, 0x1f, 0x924},
+ { 0x2b89c0, 1, 0x1f, 0x924},
+ { 0x2b8a00, 1, 0x1f, 0x9a4},
+ { 0x2b8a40, 1, 0x1f, 0x924},
+ { 0x2b8a80, 1, 0x1f, 0x492},
+ { 0x2b8ac0, 1, 0x1f, 0x924},
+ { 0x2b8b00, 1, 0x1f, 0x924},
+ { 0x2b8b40, 1, 0x1f, 0x924},
+ { 0x2b8b80, 1, 0x1f, 0x924},
+ { 0x2b8bc0, 1, 0x1f, 0x924},
+ { 0x2b8c00, 1, 0x1f, 0x924},
+ { 0x2b8c40, 1, 0x1f, 0x924},
+ { 0x2b8c80, 1, 0x1f, 0x924},
+ { 0x2b8cc0, 1, 0x1f, 0x924},
+ { 0x2b8cc4, 1, 0x1c, 0x924},
+ { 0x2b8d00, 1, 0x1f, 0x924},
+ { 0x2b8d40, 1, 0x1f, 0x924},
+ { 0x2b8d80, 1, 0x1f, 0x924},
+ { 0x2b8dc0, 1, 0x1f, 0x924},
+ { 0x2b8e00, 1, 0x1f, 0x924},
+ { 0x2b8e40, 1, 0x1f, 0x924},
+ { 0x2b8e80, 1, 0x1f, 0x924},
+ { 0x2b8e84, 1, 0x1c, 0x924},
+ { 0x2b8ec0, 1, 0x1e, 0x924},
+ { 0x2b8f00, 1, 0x1e, 0x924},
+ { 0x2b8f40, 1, 0x1e, 0x924},
+ { 0x2b8f80, 1, 0x1e, 0x924},
+ { 0x2b8fc0, 1, 0x1e, 0x924},
+ { 0x2b8fd4, 5, 0x1c, 0x924},
+ { 0x2b8fe8, 2, 0x18, 0x924},
+ { 0x2b9000, 1, 0x1c, 0x924},
+ { 0x2b9040, 3, 0x1c, 0x924},
+ { 0x2b905c, 1, 0x18, 0x924},
+ { 0x2b9064, 1, 0x10, 0x924},
+ { 0x2b9080, 10, 0x10, 0x924},
+ { 0x2c0000, 2, 0x1f, 0x1fff},
+ { 0x300000, 65, 0x1f, 0x924},
+ { 0x300124, 2, 0x1f, 0x1fff},
+ { 0x300130, 3, 0x1f, 0x1fff},
+ { 0x300140, 1, 0x1f, 0x1fff},
+ { 0x30014c, 2, 0x1e, 0x924},
+ { 0x300200, 27, 0x1f, 0x924},
+ { 0x30026c, 1, 0x1f, 0xfff},
+ { 0x300270, 12, 0x1f, 0x924},
+ { 0x3002a0, 1, 0x1f, 0xfff},
+ { 0x3002a4, 17, 0x1f, 0x924},
+ { 0x300340, 4, 0x1f, 0x924},
+ { 0x300380, 1, 0x1c, 0x924},
+ { 0x300388, 1, 0x1c, 0x924},
+ { 0x300390, 1, 0x1c, 0x924},
+ { 0x300398, 1, 0x1c, 0x924},
+ { 0x3003a0, 1, 0x1c, 0x924},
+ { 0x3003a8, 2, 0x1c, 0x924},
+ { 0x300400, 256, 0x3, 0xfff},
+ { 0x302000, 4, 0x1f, 0xf24},
+ { 0x302010, 2044, 0x1f, 0xe00},
+ { 0x304000, 4, 0x18, 0x924},
+ { 0x320000, 1, 0x1f, 0xb24},
+ { 0x320004, 5631, 0x1f, 0x200},
+ { 0x325800, 2560, 0x1e, 0x200},
+ { 0x328000, 1, 0x1f, 0xb24},
+ { 0x328004, 8191, 0x1e, 0x200},
+ { 0x330000, 1, 0x1f, 0xb24},
+ { 0x330004, 15, 0x2, 0x200},
+ { 0x330040, 1, 0x1e, 0xb24},
+ { 0x330044, 239, 0x2, 0x200},
+ { 0x330400, 1, 0x1f, 0xb24},
+ { 0x330404, 255, 0x2, 0x200},
+ { 0x330800, 1, 0x1f, 0x924},
+ { 0x330840, 1, 0x1e, 0x924},
+ { 0x330c00, 1, 0x1f, 0x924},
+ { 0x331000, 1, 0x1f, 0x924},
+ { 0x331040, 1, 0x1e, 0x924},
+ { 0x331400, 1, 0x1f, 0x924},
+ { 0x331440, 1, 0x1e, 0x924},
+ { 0x331480, 1, 0x1e, 0x924},
+ { 0x3314c0, 1, 0x1e, 0x924},
+ { 0x331800, 128, 0x1f, 0x400},
+ { 0x331c00, 128, 0x1f, 0x400},
+ { 0x332000, 1, 0x1f, 0xdb6},
+ { 0x332400, 1, 0x1e, 0xb24},
+ { 0x332404, 5631, 0x1c, 0x200},
+ { 0x338000, 1, 0x1f, 0xfff},
+ { 0x338040, 1, 0x1f, 0xfff},
+ { 0x338080, 1, 0x1f, 0xfff},
+ { 0x3380c0, 1, 0x1f, 0xfff},
+ { 0x338100, 1, 0x1f, 0x924},
+ { 0x338140, 1, 0x1f, 0x924},
+ { 0x338180, 1, 0x1f, 0x924},
+ { 0x3381c0, 1, 0x1f, 0x924},
+ { 0x338200, 1, 0x1f, 0x924},
+ { 0x338240, 1, 0x1f, 0x924},
+ { 0x338280, 1, 0x1f, 0x924},
+ { 0x3382c0, 1, 0x1f, 0x924},
+ { 0x338300, 1, 0x1f, 0x924},
+ { 0x338340, 1, 0x1f, 0x924},
+ { 0x338380, 1, 0x1f, 0x924},
+ { 0x3383c0, 1, 0x1f, 0x924},
+ { 0x338400, 1, 0x1f, 0x924},
+ { 0x338440, 1, 0x1f, 0x924},
+ { 0x338480, 1, 0x1f, 0x924},
+ { 0x3384c0, 1, 0x1f, 0x924},
+ { 0x338500, 1, 0x1f, 0x924},
+ { 0x338540, 1, 0x1f, 0x924},
+ { 0x338580, 1, 0x1f, 0x924},
+ { 0x3385c0, 19, 0x1c, 0x924},
+ { 0x338800, 1, 0x1f, 0x924},
+ { 0x338840, 1, 0x1f, 0x924},
+ { 0x338880, 1, 0x1f, 0x924},
+ { 0x3388c0, 1, 0x1f, 0x924},
+ { 0x338900, 1, 0x1f, 0x924},
+ { 0x338940, 1, 0x1f, 0x924},
+ { 0x338980, 1, 0x1f, 0x924},
+ { 0x3389c0, 1, 0x1f, 0x924},
+ { 0x338a00, 1, 0x1f, 0xd24},
+ { 0x338a40, 1, 0x1f, 0x924},
+ { 0x338a80, 1, 0x1f, 0x492},
+ { 0x338ac0, 1, 0x1f, 0x924},
+ { 0x338b00, 1, 0x1f, 0x924},
+ { 0x338b40, 1, 0x1f, 0x924},
+ { 0x338b80, 1, 0x1f, 0x924},
+ { 0x338bc0, 1, 0x1f, 0x924},
+ { 0x338c00, 1, 0x1f, 0x924},
+ { 0x338c40, 1, 0x1f, 0x924},
+ { 0x338c80, 1, 0x1f, 0x924},
+ { 0x338cc0, 1, 0x1f, 0x924},
+ { 0x338cc4, 1, 0x1c, 0x924},
+ { 0x338d00, 1, 0x1f, 0x924},
+ { 0x338d40, 1, 0x1f, 0x924},
+ { 0x338d80, 1, 0x1f, 0x924},
+ { 0x338dc0, 1, 0x1f, 0x924},
+ { 0x338e00, 1, 0x1f, 0x924},
+ { 0x338e40, 1, 0x1f, 0x924},
+ { 0x338e80, 1, 0x1f, 0x924},
+ { 0x338e84, 1, 0x1c, 0x924},
+ { 0x338ec0, 1, 0x1e, 0x924},
+ { 0x338f00, 1, 0x1e, 0x924},
+ { 0x338f40, 1, 0x1e, 0x924},
+ { 0x338f80, 1, 0x1e, 0x924},
+ { 0x338fc0, 1, 0x1e, 0x924},
+ { 0x338fd4, 5, 0x1c, 0x924},
+ { 0x338fe8, 2, 0x18, 0x924},
+ { 0x339000, 1, 0x1c, 0x924},
+ { 0x339040, 3, 0x1c, 0x924},
+ { 0x33905c, 1, 0x18, 0x924},
+ { 0x339064, 1, 0x10, 0x924},
+ { 0x339080, 10, 0x10, 0x924},
+ { 0x340000, 2, 0x1f, 0x924},
+ { 0x3a0000, 40960, 0x1c, 0x1000}
};
-#define REGS_COUNT ARRAY_SIZE(reg_addrs)
-static const struct dump_sign dump_sign_all = { 0x4e23fde1, 0x70017, 0x3a };
+#define REGS_COUNT ARRAY_SIZE(reg_addrs)
+
+static const struct reg_addr idle_reg_addrs[] = {
+ { 0x2104, 1, 0x1f, 0xfff},
+ { 0x2110, 2, 0x1f, 0xfff},
+ { 0x211c, 8, 0x1f, 0xfff},
+ { 0x2814, 1, 0x1f, 0xfff},
+ { 0x281c, 2, 0x1f, 0xfff},
+ { 0x2854, 1, 0x1f, 0xfff},
+ { 0x285c, 1, 0x1f, 0xfff},
+ { 0x3040, 1, 0x1f, 0xfff},
+ { 0x9010, 7, 0x1c, 0xfff},
+ { 0x9030, 1, 0x1c, 0xfff},
+ { 0x9068, 16, 0x1c, 0xfff},
+ { 0x9230, 2, 0x1c, 0xfff},
+ { 0x9244, 1, 0x1c, 0xfff},
+ { 0x9298, 1, 0x1c, 0xfff},
+ { 0x92a8, 1, 0x1c, 0x1fff},
+ { 0xa38c, 1, 0x1f, 0x1fff},
+ { 0xa3c4, 1, 0x1e, 0xfff},
+ { 0xa404, 1, 0x1f, 0xfff},
+ { 0xa408, 2, 0x1f, 0x1fff},
+ { 0xa42c, 12, 0x1f, 0xfff},
+ { 0xa580, 1, 0x1f, 0x1fff},
+ { 0xa590, 1, 0x1f, 0x1fff},
+ { 0xa600, 5, 0x1e, 0xfff},
+ { 0xa618, 1, 0x1e, 0xfff},
+ { 0xa714, 1, 0x1c, 0xfff},
+ { 0xa720, 1, 0x1c, 0xfff},
+ { 0xa750, 1, 0x1c, 0xfff},
+ { 0xc09c, 1, 0x3, 0xfff},
+ { 0x103b0, 1, 0x1f, 0xfff},
+ { 0x103c0, 1, 0x1f, 0xfff},
+ { 0x103d0, 1, 0x3, 0x1fff},
+ { 0x10418, 1, 0x1f, 0xfff},
+ { 0x10420, 1, 0x1f, 0xfff},
+ { 0x10428, 1, 0x1f, 0xfff},
+ { 0x10460, 1, 0x1f, 0xfff},
+ { 0x10474, 1, 0x1f, 0xfff},
+ { 0x104e0, 1, 0x1f, 0xfff},
+ { 0x104ec, 1, 0x1f, 0xfff},
+ { 0x104f8, 1, 0x1f, 0xfff},
+ { 0x10508, 1, 0x1f, 0xfff},
+ { 0x10530, 1, 0x1f, 0xfff},
+ { 0x10538, 1, 0x1f, 0xfff},
+ { 0x10548, 1, 0x1f, 0xfff},
+ { 0x10558, 1, 0x1f, 0xfff},
+ { 0x182a8, 1, 0x1c, 0xfff},
+ { 0x182b8, 1, 0x1c, 0xfff},
+ { 0x18308, 1, 0x1c, 0xfff},
+ { 0x18318, 1, 0x1c, 0xfff},
+ { 0x18338, 1, 0x1c, 0xfff},
+ { 0x18348, 1, 0x1c, 0xfff},
+ { 0x183bc, 1, 0x1c, 0x1fff},
+ { 0x183cc, 1, 0x1c, 0x1fff},
+ { 0x18570, 1, 0x18, 0xfff},
+ { 0x18578, 1, 0x18, 0xfff},
+ { 0x1858c, 1, 0x18, 0xfff},
+ { 0x18594, 1, 0x18, 0xfff},
+ { 0x1862c, 4, 0x10, 0xfff},
+ { 0x2021c, 11, 0x1f, 0xfff},
+ { 0x202a8, 1, 0x1f, 0xfff},
+ { 0x202b8, 1, 0x1f, 0x1fff},
+ { 0x20404, 1, 0x1f, 0xfff},
+ { 0x2040c, 2, 0x1f, 0xfff},
+ { 0x2041c, 2, 0x1f, 0xfff},
+ { 0x40154, 14, 0x1f, 0xfff},
+ { 0x40198, 1, 0x1f, 0x1fff},
+ { 0x404ac, 1, 0x1f, 0xfff},
+ { 0x404bc, 1, 0x1f, 0x1fff},
+ { 0x42290, 1, 0x1f, 0xfff},
+ { 0x422a0, 1, 0x1f, 0xfff},
+ { 0x422b0, 1, 0x1f, 0x1fff},
+ { 0x42548, 1, 0x1f, 0xfff},
+ { 0x42550, 1, 0x1f, 0xfff},
+ { 0x42558, 1, 0x1f, 0xfff},
+ { 0x50160, 8, 0x1f, 0xfff},
+ { 0x501d0, 1, 0x1f, 0xfff},
+ { 0x501e0, 1, 0x1f, 0x1fff},
+ { 0x50204, 1, 0x1f, 0xfff},
+ { 0x5020c, 2, 0x1f, 0xfff},
+ { 0x5021c, 1, 0x1f, 0xfff},
+ { 0x60090, 1, 0x1f, 0xfff},
+ { 0x6011c, 1, 0x1f, 0xfff},
+ { 0x6012c, 1, 0x1f, 0x1fff},
+ { 0xc101c, 1, 0x1f, 0xfff},
+ { 0xc102c, 1, 0x1f, 0x1fff},
+ { 0xc2290, 1, 0x1f, 0xfff},
+ { 0xc22a0, 1, 0x1f, 0xfff},
+ { 0xc22b0, 1, 0x1f, 0x1fff},
+ { 0xc2548, 1, 0x1f, 0xfff},
+ { 0xc2550, 1, 0x1f, 0xfff},
+ { 0xc2558, 1, 0x1f, 0xfff},
+ { 0xc4294, 1, 0x1f, 0xfff},
+ { 0xc42a4, 1, 0x1f, 0xfff},
+ { 0xc42b4, 1, 0x1f, 0x1fff},
+ { 0xc4550, 1, 0x1f, 0xfff},
+ { 0xc4558, 1, 0x1f, 0xfff},
+ { 0xc4560, 1, 0x1f, 0xfff},
+ { 0xd016c, 8, 0x1f, 0xfff},
+ { 0xd01d8, 1, 0x1f, 0xfff},
+ { 0xd01e8, 1, 0x1f, 0x1fff},
+ { 0xd0204, 1, 0x1f, 0xfff},
+ { 0xd020c, 3, 0x1f, 0xfff},
+ { 0xe0154, 8, 0x1f, 0xfff},
+ { 0xe01c8, 1, 0x1f, 0xfff},
+ { 0xe01d8, 1, 0x1f, 0x1fff},
+ { 0xe0204, 1, 0x1f, 0xfff},
+ { 0xe020c, 2, 0x1f, 0xfff},
+ { 0xe021c, 2, 0x1f, 0xfff},
+ { 0x101014, 1, 0x1f, 0xfff},
+ { 0x101030, 1, 0x1f, 0xfff},
+ { 0x101040, 1, 0x1f, 0x1fff},
+ { 0x102058, 1, 0x1f, 0x1fff},
+ { 0x102080, 16, 0x1f, 0xfff},
+ { 0x103004, 2, 0x1f, 0xfff},
+ { 0x103068, 1, 0x1f, 0xfff},
+ { 0x103078, 1, 0x1f, 0xfff},
+ { 0x103088, 1, 0x1f, 0x1fff},
+ { 0x10309c, 2, 0x1e, 0xfff},
+ { 0x1030b8, 2, 0x1c, 0xfff},
+ { 0x1030cc, 1, 0x1c, 0xfff},
+ { 0x1030e0, 1, 0x1c, 0xfff},
+ { 0x104004, 1, 0x1f, 0xfff},
+ { 0x104018, 1, 0x1f, 0xfff},
+ { 0x104020, 1, 0x1f, 0xfff},
+ { 0x10403c, 1, 0x1f, 0xfff},
+ { 0x1040fc, 1, 0x1f, 0xfff},
+ { 0x10410c, 1, 0x1f, 0x1fff},
+ { 0x104400, 1, 0x1f, 0x1fff},
+ { 0x104404, 63, 0x1f, 0xfff},
+ { 0x104800, 1, 0x1f, 0x1fff},
+ { 0x104804, 63, 0x1f, 0xfff},
+ { 0x105000, 4, 0x1f, 0x1fff},
+ { 0x105010, 252, 0x1f, 0xfff},
+ { 0x108094, 1, 0x3, 0xfff},
+ { 0x1201b0, 2, 0x1f, 0xfff},
+ { 0x12032c, 1, 0x1f, 0xfff},
+ { 0x12036c, 3, 0x1f, 0xfff},
+ { 0x120408, 2, 0x1f, 0xfff},
+ { 0x120414, 15, 0x1f, 0xfff},
+ { 0x120478, 2, 0x1f, 0xfff},
+ { 0x12052c, 1, 0x1f, 0xfff},
+ { 0x120564, 3, 0x1f, 0xfff},
+ { 0x12057c, 1, 0x1f, 0x1fff},
+ { 0x12058c, 1, 0x1f, 0x1fff},
+ { 0x120608, 1, 0x1e, 0xfff},
+ { 0x120748, 1, 0x1c, 0xfff},
+ { 0x120778, 2, 0x1c, 0xfff},
+ { 0x120808, 3, 0x1f, 0xfff},
+ { 0x120818, 1, 0x1f, 0xfff},
+ { 0x120820, 1, 0x1f, 0xfff},
+ { 0x120828, 1, 0x1f, 0xfff},
+ { 0x120830, 1, 0x1f, 0xfff},
+ { 0x120838, 1, 0x1f, 0xfff},
+ { 0x120840, 1, 0x1f, 0xfff},
+ { 0x120848, 1, 0x1f, 0xfff},
+ { 0x120850, 1, 0x1f, 0xfff},
+ { 0x120858, 1, 0x1f, 0xfff},
+ { 0x120860, 1, 0x1f, 0xfff},
+ { 0x120868, 1, 0x1f, 0xfff},
+ { 0x120870, 1, 0x1f, 0xfff},
+ { 0x120878, 1, 0x1f, 0xfff},
+ { 0x120880, 1, 0x1f, 0xfff},
+ { 0x120888, 1, 0x1f, 0xfff},
+ { 0x120890, 1, 0x1f, 0xfff},
+ { 0x120898, 1, 0x1f, 0xfff},
+ { 0x1208a0, 1, 0x1f, 0xfff},
+ { 0x1208a8, 1, 0x1f, 0xfff},
+ { 0x1208b0, 1, 0x1f, 0xfff},
+ { 0x1208b8, 1, 0x1f, 0xfff},
+ { 0x1208c0, 1, 0x1f, 0xfff},
+ { 0x1208c8, 1, 0x1f, 0xfff},
+ { 0x1208d0, 1, 0x1f, 0xfff},
+ { 0x1208d8, 1, 0x1f, 0xfff},
+ { 0x1208e0, 1, 0x1f, 0xfff},
+ { 0x1208e8, 1, 0x1f, 0xfff},
+ { 0x1208f0, 1, 0x1f, 0xfff},
+ { 0x1208f8, 1, 0x1f, 0xfff},
+ { 0x120900, 1, 0x1f, 0xfff},
+ { 0x120908, 1, 0x1f, 0xfff},
+ { 0x130030, 1, 0x1c, 0xfff},
+ { 0x13004c, 3, 0x1c, 0xfff},
+ { 0x130064, 2, 0x1c, 0xfff},
+ { 0x13009c, 1, 0x1c, 0x1fff},
+ { 0x130130, 1, 0x1c, 0xfff},
+ { 0x13016c, 1, 0x1c, 0xfff},
+ { 0x130300, 1, 0x1c, 0xfff},
+ { 0x130480, 1, 0x1c, 0xfff},
+ { 0x14005c, 2, 0xf, 0xfff},
+ { 0x1400d0, 2, 0xf, 0xfff},
+ { 0x1400e0, 1, 0xf, 0xfff},
+ { 0x1401c8, 1, 0xf, 0xfff},
+ { 0x140200, 6, 0xf, 0xfff},
+ { 0x140338, 7, 0x10, 0xfff},
+ { 0x140370, 7, 0x10, 0xfff},
+ { 0x15c1bc, 6, 0x10, 0xfff},
+ { 0x15c230, 7, 0x10, 0xfff},
+ { 0x16101c, 1, 0x1f, 0xfff},
+ { 0x16102c, 1, 0x1f, 0x1fff},
+ { 0x164014, 2, 0x1f, 0xfff},
+ { 0x1640f0, 1, 0x1f, 0xfff},
+ { 0x166290, 1, 0x1f, 0xfff},
+ { 0x1662a0, 1, 0x1f, 0xfff},
+ { 0x1662b0, 1, 0x1f, 0x1fff},
+ { 0x166548, 1, 0x1f, 0xfff},
+ { 0x166550, 1, 0x1f, 0xfff},
+ { 0x166558, 1, 0x1f, 0xfff},
+ { 0x168000, 1, 0x1f, 0xfff},
+ { 0x168008, 1, 0x1f, 0xfff},
+ { 0x168010, 1, 0x1f, 0xfff},
+ { 0x168018, 1, 0x1f, 0xfff},
+ { 0x168028, 2, 0x1f, 0xfff},
+ { 0x168058, 9, 0x1f, 0xfff},
+ { 0x168238, 1, 0x1f, 0xfff},
+ { 0x1682d0, 7, 0x1f, 0xfff},
+ { 0x168300, 2, 0x3, 0xfff},
+ { 0x168308, 65, 0x1f, 0xfff},
+ { 0x168410, 2, 0x1f, 0xfff},
+ { 0x168438, 1, 0x1f, 0xfff},
+ { 0x168448, 1, 0x1f, 0x1fff},
+ { 0x168a00, 128, 0x1f, 0xfff},
+ { 0x16e200, 128, 0x2, 0xfff},
+ { 0x16e404, 2, 0x2, 0xfff},
+ { 0x16e584, 64, 0x2, 0xfff},
+ { 0x16e684, 2, 0x1e, 0xfff},
+ { 0x16e68c, 4, 0x2, 0xfff},
+ { 0x16e6fc, 4, 0x1c, 0xfff},
+ { 0x16e7ac, 12, 0x10, 0xfff},
+ { 0x1700a4, 1, 0x1f, 0xfff},
+ { 0x1700ac, 2, 0x1f, 0xfff},
+ { 0x1700c0, 1, 0x1f, 0xfff},
+ { 0x170174, 1, 0x1f, 0xfff},
+ { 0x170184, 1, 0x1f, 0x1fff},
+ { 0x1800f4, 1, 0x1f, 0xfff},
+ { 0x180104, 1, 0x1f, 0xfff},
+ { 0x180114, 1, 0x1f, 0x1fff},
+ { 0x180124, 1, 0x1f, 0x1fff},
+ { 0x18026c, 1, 0x1f, 0xfff},
+ { 0x1802a0, 1, 0x1f, 0xfff},
+ { 0x1b8000, 1, 0x1f, 0xfff},
+ { 0x1b8040, 1, 0x1f, 0xfff},
+ { 0x1b8080, 1, 0x1f, 0xfff},
+ { 0x1b80c0, 1, 0x1f, 0xfff},
+ { 0x200104, 1, 0x1f, 0xfff},
+ { 0x200114, 1, 0x1f, 0xfff},
+ { 0x200124, 1, 0x1f, 0x1fff},
+ { 0x200134, 1, 0x1f, 0x1fff},
+ { 0x20026c, 1, 0x1f, 0xfff},
+ { 0x2002a0, 1, 0x1f, 0xfff},
+ { 0x238000, 1, 0x1f, 0xfff},
+ { 0x238040, 1, 0x1f, 0xfff},
+ { 0x238080, 1, 0x1f, 0xfff},
+ { 0x2380c0, 1, 0x1f, 0xfff},
+ { 0x280104, 1, 0x1f, 0xfff},
+ { 0x280114, 1, 0x1f, 0xfff},
+ { 0x280124, 1, 0x1f, 0x1fff},
+ { 0x280134, 1, 0x1f, 0x1fff},
+ { 0x28026c, 1, 0x1f, 0xfff},
+ { 0x2802a0, 1, 0x1f, 0xfff},
+ { 0x2b8000, 1, 0x1f, 0xfff},
+ { 0x2b8040, 1, 0x1f, 0xfff},
+ { 0x2b8080, 1, 0x1f, 0xfff},
+ { 0x300104, 1, 0x1f, 0xfff},
+ { 0x300114, 1, 0x1f, 0xfff},
+ { 0x300124, 1, 0x1f, 0x1fff},
+ { 0x300134, 1, 0x1f, 0x1fff},
+ { 0x30026c, 1, 0x1f, 0xfff},
+ { 0x3002a0, 1, 0x1f, 0xfff},
+ { 0x338000, 1, 0x1f, 0xfff},
+ { 0x338040, 1, 0x1f, 0xfff},
+ { 0x338080, 1, 0x1f, 0xfff},
+ { 0x3380c0, 1, 0x1f, 0xfff}
+};
-static const u32 page_vals_e2[] = { 0, 128 };
-#define PAGE_MODE_VALUES_E2 ARRAY_SIZE(page_vals_e2)
+#define IDLE_REGS_COUNT ARRAY_SIZE(idle_reg_addrs)
-static const u32 page_write_regs_e2[] = { 328476 };
-#define PAGE_WRITE_REGS_E2 ARRAY_SIZE(page_write_regs_e2)
+static const u32 read_reg_e1[] = {
+ 0x1b1000};
-static const struct reg_addr page_read_regs_e2[] = {
- { 0x58000, 4608, RI_E2_ONLINE } };
-#define PAGE_READ_REGS_E2 ARRAY_SIZE(page_read_regs_e2)
+static const struct wreg_addr wreg_addr_e1 = {
+ 0x1b0c00, 192, 1, read_reg_e1, 0x1f, 0x1fff};
-static const u32 page_vals_e3[] = { 0, 128 };
-#define PAGE_MODE_VALUES_E3 ARRAY_SIZE(page_vals_e3)
+static const u32 read_reg_e1h[] = {
+ 0x1b1040, 0x1b1000};
-static const u32 page_write_regs_e3[] = { 328476 };
-#define PAGE_WRITE_REGS_E3 ARRAY_SIZE(page_write_regs_e3)
+static const struct wreg_addr wreg_addr_e1h = {
+ 0x1b0c00, 256, 2, read_reg_e1h, 0x1f, 0x1fff};
-static const struct reg_addr page_read_regs_e3[] = {
- { 0x58000, 4608, RI_E3E3B0_ONLINE } };
-#define PAGE_READ_REGS_E3 ARRAY_SIZE(page_read_regs_e3)
+static const u32 read_reg_e2[] = {
+ 0x1b1040, 0x1b1000};
-#endif /* BNX2X_DUMP_H */
+static const struct wreg_addr wreg_addr_e2 = {
+ 0x1b0c00, 128, 2, read_reg_e2, 0x1f, 0x1fff};
+
+static const u32 read_reg_e3[] = {
+ 0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e3 = {
+ 0x1b0c00, 128, 2, read_reg_e3, 0x1f, 0x1fff};
+
+static const u32 read_reg_e3b0[] = {
+ 0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e3b0 = {
+ 0x1b0c00, 128, 2, read_reg_e3b0, 0x1f, 0x1fff};
+
+static const unsigned int dump_num_registers[NUM_CHIPS][NUM_PRESETS] = {
+ {20782, 18567, 27975, 19729, 18311, 27719, 20836, 32391, 41799, 20812,
+ 26247, 35655, 19074},
+ {32774, 19297, 33277, 31721, 19041, 33021, 32828, 33121, 47101, 32804,
+ 26977, 40957, 35895},
+ {36527, 17928, 33697, 35474, 18700, 34466, 36581, 31752, 47521, 36557,
+ 25608, 41377, 43903},
+ {45239, 17936, 34387, 44186, 18708, 35156, 45293, 31760, 48211, 45269,
+ 25616, 42067, 43903},
+ {45302, 17999, 34802, 44249, 18771, 35571, 45356, 31823, 48626, 45332,
+ 25679, 42482, 43903}
+};
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 277f17e3c8f..25eddd90f48 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1,12 +1,12 @@
/* bnx2x_ethtool.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -186,6 +186,7 @@ static const struct {
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
+
static int bnx2x_get_port_type(struct bnx2x *bp)
{
int port_type;
@@ -233,7 +234,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
!(bp->flags & MF_FUNC_DIS)) {
- cmd->duplex = bp->link_vars.duplex;
+ cmd->duplex = bp->link_vars.duplex;
if (IS_MF(bp) && !BP_NOMCP(bp))
ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
@@ -280,6 +281,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
}
cmd->maxtxpkt = 0;
@@ -317,7 +320,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
speed = ethtool_cmd_speed(cmd);
- /* If recieved a request for an unknown duplex, assume full*/
+ /* If received a request for an unknown duplex, assume full*/
if (cmd->duplex == DUPLEX_UNKNOWN)
cmd->duplex = DUPLEX_FULL;
@@ -355,51 +358,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cfg_idx = bnx2x_get_link_cfg_idx(bp);
old_multi_phy_config = bp->link_params.multi_phy_config;
- switch (cmd->port) {
- case PORT_TP:
- if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
- break; /* no port change */
-
- if (!(bp->port.supported[0] & SUPPORTED_TP ||
- bp->port.supported[1] & SUPPORTED_TP)) {
- DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
- return -EINVAL;
- }
- bp->link_params.multi_phy_config &=
- ~PORT_HW_CFG_PHY_SELECTION_MASK;
- if (bp->link_params.multi_phy_config &
- PORT_HW_CFG_PHY_SWAPPED_ENABLED)
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
- else
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
- break;
- case PORT_FIBRE:
- case PORT_DA:
- if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
- break; /* no port change */
-
- if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
- bp->port.supported[1] & SUPPORTED_FIBRE)) {
+ if (cmd->port != bnx2x_get_port_type(bp)) {
+ switch (cmd->port) {
+ case PORT_TP:
+ if (!(bp->port.supported[0] & SUPPORTED_TP ||
+ bp->port.supported[1] & SUPPORTED_TP)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_FIBRE:
+ case PORT_DA:
+ case PORT_NONE:
+ if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
+ bp->port.supported[1] & SUPPORTED_FIBRE)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ default:
DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
return -EINVAL;
}
- bp->link_params.multi_phy_config &=
- ~PORT_HW_CFG_PHY_SELECTION_MASK;
- if (bp->link_params.multi_phy_config &
- PORT_HW_CFG_PHY_SWAPPED_ENABLED)
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
- else
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
- break;
- default:
- DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
- return -EINVAL;
}
- /* Save new config in case command complete successully */
+ /* Save new config in case command complete successfully */
new_multi_phy_config = bp->link_params.multi_phy_config;
/* Get the new cfg_idx */
cfg_idx = bnx2x_get_link_cfg_idx(bp);
@@ -462,6 +464,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ADVERTISED_10000baseKR_Full))
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
+
+ if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
}
} else { /* forced speed */
/* advertise the requested speed and duplex if supported */
@@ -596,29 +602,61 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
-#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
-#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
-#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE)
-#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
-#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)
+#define DUMP_ALL_PRESETS 0x1FFF
+#define DUMP_MAX_PRESETS 13
-static bool bnx2x_is_reg_online(struct bnx2x *bp,
- const struct reg_addr *reg_info)
+static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset)
{
if (CHIP_IS_E1(bp))
- return IS_E1_ONLINE(reg_info->info);
+ return dump_num_registers[0][preset-1];
else if (CHIP_IS_E1H(bp))
- return IS_E1H_ONLINE(reg_info->info);
+ return dump_num_registers[1][preset-1];
else if (CHIP_IS_E2(bp))
- return IS_E2_ONLINE(reg_info->info);
+ return dump_num_registers[2][preset-1];
else if (CHIP_IS_E3A0(bp))
- return IS_E3_ONLINE(reg_info->info);
+ return dump_num_registers[3][preset-1];
else if (CHIP_IS_E3B0(bp))
- return IS_E3B0_ONLINE(reg_info->info);
+ return dump_num_registers[4][preset-1];
else
- return false;
+ return 0;
}
+static int __bnx2x_get_regs_len(struct bnx2x *bp)
+{
+ u32 preset_idx;
+ int regdump_len = 0;
+
+ /* Calculate the total preset regs length */
+ for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++)
+ regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx);
+
+ return regdump_len;
+}
+
+static int bnx2x_get_regs_len(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int regdump_len = 0;
+
+ if (IS_VF(bp))
+ return 0;
+
+ regdump_len = __bnx2x_get_regs_len(bp);
+ regdump_len *= 4;
+ regdump_len += sizeof(struct dump_header);
+
+ return regdump_len;
+}
+
+#define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
+#define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
+#define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
+#define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
+#define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
+
+#define IS_REG_IN_PRESET(presets, idx) \
+ ((presets & (1 << (idx-1))) == (1 << (idx-1)))
+
/******* Paged registers info selectors ********/
static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
{
@@ -680,38 +718,38 @@ static u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
return 0;
}
-static int __bnx2x_get_regs_len(struct bnx2x *bp)
+static bool bnx2x_is_reg_in_chip(struct bnx2x *bp,
+ const struct reg_addr *reg_info)
{
- int num_pages = __bnx2x_get_page_reg_num(bp);
- int page_write_num = __bnx2x_get_page_write_num(bp);
- const struct reg_addr *page_read_addr = __bnx2x_get_page_read_ar(bp);
- int page_read_num = __bnx2x_get_page_read_num(bp);
- int regdump_len = 0;
- int i, j, k;
-
- for (i = 0; i < REGS_COUNT; i++)
- if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
- regdump_len += reg_addrs[i].size;
-
- for (i = 0; i < num_pages; i++)
- for (j = 0; j < page_write_num; j++)
- for (k = 0; k < page_read_num; k++)
- if (bnx2x_is_reg_online(bp, &page_read_addr[k]))
- regdump_len += page_read_addr[k].size;
-
- return regdump_len;
+ if (CHIP_IS_E1(bp))
+ return IS_E1_REG(reg_info->chips);
+ else if (CHIP_IS_E1H(bp))
+ return IS_E1H_REG(reg_info->chips);
+ else if (CHIP_IS_E2(bp))
+ return IS_E2_REG(reg_info->chips);
+ else if (CHIP_IS_E3A0(bp))
+ return IS_E3A0_REG(reg_info->chips);
+ else if (CHIP_IS_E3B0(bp))
+ return IS_E3B0_REG(reg_info->chips);
+ else
+ return false;
}
-static int bnx2x_get_regs_len(struct net_device *dev)
+static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp,
+ const struct wreg_addr *wreg_info)
{
- struct bnx2x *bp = netdev_priv(dev);
- int regdump_len = 0;
-
- regdump_len = __bnx2x_get_regs_len(bp);
- regdump_len *= 4;
- regdump_len += sizeof(struct dump_hdr);
-
- return regdump_len;
+ if (CHIP_IS_E1(bp))
+ return IS_E1_REG(wreg_info->chips);
+ else if (CHIP_IS_E1H(bp))
+ return IS_E1H_REG(wreg_info->chips);
+ else if (CHIP_IS_E2(bp))
+ return IS_E2_REG(wreg_info->chips);
+ else if (CHIP_IS_E3A0(bp))
+ return IS_E3A0_REG(wreg_info->chips);
+ else if (CHIP_IS_E3B0(bp))
+ return IS_E3B0_REG(wreg_info->chips);
+ else
+ return false;
}
/**
@@ -725,9 +763,10 @@ static int bnx2x_get_regs_len(struct net_device *dev)
* ("read address"). There may be more than one write address per "page" and
* more than one read address per write address.
*/
-static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
+static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset)
{
u32 i, j, k, n;
+
/* addresses of the paged registers */
const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
/* number of paged registers */
@@ -740,32 +779,100 @@ static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
/* number of read addresses */
int read_num = __bnx2x_get_page_read_num(bp);
+ u32 addr, size;
for (i = 0; i < num_pages; i++) {
for (j = 0; j < write_num; j++) {
REG_WR(bp, write_addr[j], page_addr[i]);
- for (k = 0; k < read_num; k++)
- if (bnx2x_is_reg_online(bp, &read_addr[k]))
- for (n = 0; n <
- read_addr[k].size; n++)
- *p++ = REG_RD(bp,
- read_addr[k].addr + n*4);
+
+ for (k = 0; k < read_num; k++) {
+ if (IS_REG_IN_PRESET(read_addr[k].presets,
+ preset)) {
+ size = read_addr[k].size;
+ for (n = 0; n < size; n++) {
+ addr = read_addr[k].addr + n*4;
+ *p++ = REG_RD(bp, addr);
+ }
+ }
+ }
}
}
}
-static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
+static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset)
{
- u32 i, j;
+ u32 i, j, addr;
+ const struct wreg_addr *wreg_addr_p = NULL;
+
+ if (CHIP_IS_E1(bp))
+ wreg_addr_p = &wreg_addr_e1;
+ else if (CHIP_IS_E1H(bp))
+ wreg_addr_p = &wreg_addr_e1h;
+ else if (CHIP_IS_E2(bp))
+ wreg_addr_p = &wreg_addr_e2;
+ else if (CHIP_IS_E3A0(bp))
+ wreg_addr_p = &wreg_addr_e3;
+ else if (CHIP_IS_E3B0(bp))
+ wreg_addr_p = &wreg_addr_e3b0;
+
+ /* Read the idle_chk registers */
+ for (i = 0; i < IDLE_REGS_COUNT; i++) {
+ if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) &&
+ IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
+ for (j = 0; j < idle_reg_addrs[i].size; j++)
+ *p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4);
+ }
+ }
/* Read the regular registers */
- for (i = 0; i < REGS_COUNT; i++)
- if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
+ for (i = 0; i < REGS_COUNT; i++) {
+ if (bnx2x_is_reg_in_chip(bp, &reg_addrs[i]) &&
+ IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
for (j = 0; j < reg_addrs[i].size; j++)
*p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
+ }
+ }
+
+ /* Read the CAM registers */
+ if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) &&
+ IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
+ for (i = 0; i < wreg_addr_p->size; i++) {
+ *p++ = REG_RD(bp, wreg_addr_p->addr + i*4);
+
+ /* In case of wreg_addr register, read additional
+ registers from read_regs array
+ */
+ for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
+ addr = *(wreg_addr_p->read_regs);
+ *p++ = REG_RD(bp, addr + j*4);
+ }
+ }
+ }
+
+ /* Paged registers are supported in E2 & E3 only */
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) {
+ /* Read "paged" registers */
+ bnx2x_read_pages_regs(bp, p, preset);
+ }
- /* Read "paged" registes */
- bnx2x_read_pages_regs(bp, p);
+ return 0;
+}
+
+static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
+{
+ u32 preset_idx;
+
+ /* Read all registers, by reading all preset registers */
+ for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
+ /* Skip presets with IOR */
+ if ((preset_idx == 2) ||
+ (preset_idx == 5) ||
+ (preset_idx == 8) ||
+ (preset_idx == 11))
+ continue;
+ __bnx2x_get_preset_regs(bp, p, preset_idx);
+ p += __bnx2x_get_preset_regs_len(bp, preset_idx);
+ }
}
static void bnx2x_get_regs(struct net_device *dev,
@@ -773,9 +880,9 @@ static void bnx2x_get_regs(struct net_device *dev,
{
u32 *p = _p;
struct bnx2x *bp = netdev_priv(dev);
- struct dump_hdr dump_hdr = {0};
+ struct dump_header dump_hdr = {0};
- regs->version = 1;
+ regs->version = 2;
memset(p, 0, regs->len);
if (!netif_running(bp->dev))
@@ -785,53 +892,138 @@ static void bnx2x_get_regs(struct net_device *dev,
* cause false alarms by reading never written registers. We
* will re-enable parity attentions right after the dump.
*/
+
bnx2x_disable_blocks_parity(bp);
- dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
- dump_hdr.dump_sign = dump_sign_all;
- dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
- dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
- dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
- dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
+ dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
+ dump_hdr.preset = DUMP_ALL_PRESETS;
+ dump_hdr.version = BNX2X_DUMP_VERSION;
+
+ /* dump_meta_data presents OR of CHIP and PATH. */
+ if (CHIP_IS_E1(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1;
+ } else if (CHIP_IS_E1H(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
+ } else if (CHIP_IS_E2(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3A0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3B0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ }
+
+ memcpy(p, &dump_hdr, sizeof(struct dump_header));
+ p += dump_hdr.header_size + 1;
- if (CHIP_IS_E1(bp))
- dump_hdr.info = RI_E1_ONLINE;
- else if (CHIP_IS_E1H(bp))
- dump_hdr.info = RI_E1H_ONLINE;
- else if (!CHIP_IS_E1x(bp))
- dump_hdr.info = RI_E2_ONLINE |
- (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
+ /* Actually read the registers */
+ __bnx2x_get_regs(bp, p);
+
+ /* Re-enable parity attentions */
+ bnx2x_clear_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
+}
+
+static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int regdump_len = 0;
+
+ regdump_len = __bnx2x_get_preset_regs_len(bp, preset);
+ regdump_len *= 4;
+ regdump_len += sizeof(struct dump_header);
+
+ return regdump_len;
+}
+
+static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* Use the ethtool_dump "flag" field as the dump preset index */
+ if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS)
+ return -EINVAL;
+
+ bp->dump_preset_idx = val->flag;
+ return 0;
+}
+
+static int bnx2x_get_dump_flag(struct net_device *dev,
+ struct ethtool_dump *dump)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ dump->version = BNX2X_DUMP_VERSION;
+ dump->flag = bp->dump_preset_idx;
+ /* Calculate the requested preset idx length */
+ dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx);
+ DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n",
+ bp->dump_preset_idx, dump->len);
+ return 0;
+}
+
+static int bnx2x_get_dump_data(struct net_device *dev,
+ struct ethtool_dump *dump,
+ void *buffer)
+{
+ u32 *p = buffer;
+ struct bnx2x *bp = netdev_priv(dev);
+ struct dump_header dump_hdr = {0};
+
+ /* Disable parity attentions as long as following dump may
+ * cause false alarms by reading never written registers. We
+ * will re-enable parity attentions right after the dump.
+ */
+
+ bnx2x_disable_blocks_parity(bp);
- memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
- p += dump_hdr.hdr_size + 1;
+ dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
+ dump_hdr.preset = bp->dump_preset_idx;
+ dump_hdr.version = BNX2X_DUMP_VERSION;
+
+ DP(BNX2X_MSG_ETHTOOL, "Get dump data of preset %d\n", dump_hdr.preset);
+
+ /* dump_meta_data presents OR of CHIP and PATH. */
+ if (CHIP_IS_E1(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1;
+ } else if (CHIP_IS_E1H(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
+ } else if (CHIP_IS_E2(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3A0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3B0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ }
+
+ memcpy(p, &dump_hdr, sizeof(struct dump_header));
+ p += dump_hdr.header_size + 1;
/* Actually read the registers */
- __bnx2x_get_regs(bp, p);
+ __bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
/* Re-enable parity attentions */
bnx2x_clear_blocks_parity(bp);
bnx2x_enable_blocks_parity(bp);
+
+ return 0;
}
static void bnx2x_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct bnx2x *bp = netdev_priv(dev);
- u8 phy_fw_ver[PHY_FW_VER_LEN];
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
- phy_fw_ver[0] = '\0';
- bnx2x_get_ext_phy_fw_version(&bp->link_params,
- phy_fw_ver, PHY_FW_VER_LEN);
- strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version));
- snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
- "bc %d.%d.%d%s%s",
- (bp->common.bc_ver & 0xff0000) >> 16,
- (bp->common.bc_ver & 0xff00) >> 8,
- (bp->common.bc_ver & 0xff),
- ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
+ bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
+
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNX2X_NUM_STATS;
info->testinfo_len = BNX2X_NUM_TESTS(bp);
@@ -861,13 +1053,13 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct bnx2x *bp = netdev_priv(dev);
if (wol->wolopts & ~WAKE_MAGIC) {
- DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n");
+ DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
return -EINVAL;
}
if (wol->wolopts & WAKE_MAGIC) {
if (bp->flags & NO_WOL_FLAG) {
- DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n");
+ DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
return -EINVAL;
}
bp->wol = 1;
@@ -890,7 +1082,7 @@ static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
if (capable(CAP_NET_ADMIN)) {
/* dump MCP trace */
- if (level & BNX2X_MSG_MCP)
+ if (IS_PF(bp) && (level & BNX2X_MSG_MCP))
bnx2x_fw_dump_lvl(bp, KERN_INFO);
bp->msg_enable = level;
}
@@ -929,8 +1121,8 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
return bp->common.flash_size;
}
-/* Per pf misc lock must be aquired before the per port mcp lock. Otherwise, had
- * we done things the other way around, if two pfs from the same port would
+/* Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
+ * had we done things the other way around, if two pfs from the same port would
* attempt to access nvram at the same time, we could run into a scenario such
* as:
* pf A takes the port lock.
@@ -940,7 +1132,7 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
* Pf B takes the lock and proceeds to perform it's own access.
* pf A unlocks the per port lock, while pf B is still working (!).
* mcp takes the per port lock and corrupts pf B's access (and/or has it's own
- * acess corrupted by pf B).*
+ * access corrupted by pf B)
*/
static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
{
@@ -1070,7 +1262,8 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
/* we read nvram data in cpu order
* but ethtool sees it as an array of bytes
- * converting to big-endian will do the work */
+ * converting to big-endian will do the work
+ */
*ret_val = cpu_to_be32(val);
rc = 0;
break;
@@ -1137,13 +1330,46 @@ static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
return rc;
}
+static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf,
+ int buf_size)
+{
+ int rc;
+
+ rc = bnx2x_nvram_read(bp, offset, (u8 *)buf, buf_size);
+
+ if (!rc) {
+ __be32 *be = (__be32 *)buf;
+
+ while ((buf_size -= 4) >= 0)
+ *buf++ = be32_to_cpu(*be++);
+ }
+
+ return rc;
+}
+
+static bool bnx2x_is_nvm_accessible(struct bnx2x *bp)
+{
+ int rc = 1;
+ u16 pm = 0;
+ struct net_device *dev = pci_get_drvdata(bp->pdev);
+
+ if (bp->pdev->pm_cap)
+ rc = pci_read_config_word(bp->pdev,
+ bp->pdev->pm_cap + PCI_PM_CTRL, &pm);
+
+ if ((rc && !netif_running(dev)) ||
+ (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
+ return false;
+
+ return true;
+}
+
static int bnx2x_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *eebuf)
{
struct bnx2x *bp = netdev_priv(dev);
- int rc;
- if (!netif_running(dev)) {
+ if (!bnx2x_is_nvm_accessible(bp)) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"cannot access eeprom when the interface is down\n");
return -EAGAIN;
@@ -1156,9 +1382,7 @@ static int bnx2x_get_eeprom(struct net_device *dev,
/* parameters already validated in ethtool_get_eeprom */
- rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
-
- return rc;
+ return bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
}
static int bnx2x_get_module_eeprom(struct net_device *dev,
@@ -1166,33 +1390,63 @@ static int bnx2x_get_module_eeprom(struct net_device *dev,
u8 *data)
{
struct bnx2x *bp = netdev_priv(dev);
- int rc = 0, phy_idx;
+ int rc = -EINVAL, phy_idx;
u8 *user_data = data;
- int remaining_len = ee->len, xfer_size;
- unsigned int page_off = ee->offset;
+ unsigned int start_addr = ee->offset, xfer_size = 0;
- if (!netif_running(dev)) {
+ if (!bnx2x_is_nvm_accessible(bp)) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"cannot access eeprom when the interface is down\n");
return -EAGAIN;
}
phy_idx = bnx2x_get_cur_phy_idx(bp);
- bnx2x_acquire_phy_lock(bp);
- while (!rc && remaining_len > 0) {
- xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ?
- SFP_EEPROM_PAGE_SIZE : remaining_len;
+
+ /* Read A0 section */
+ if (start_addr < ETH_MODULE_SFF_8079_LEN) {
+ /* Limit transfer size to the A0 section boundary */
+ if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN)
+ xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr;
+ else
+ xfer_size = ee->len;
+ bnx2x_acquire_phy_lock(bp);
rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
&bp->link_params,
- page_off,
+ I2C_DEV_ADDR_A0,
+ start_addr,
xfer_size,
user_data);
- remaining_len -= xfer_size;
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n");
+
+ return -EINVAL;
+ }
user_data += xfer_size;
- page_off += xfer_size;
+ start_addr += xfer_size;
}
- bnx2x_release_phy_lock(bp);
+ /* Read A2 section */
+ if ((start_addr >= ETH_MODULE_SFF_8079_LEN) &&
+ (start_addr < ETH_MODULE_SFF_8472_LEN)) {
+ xfer_size = ee->len - xfer_size;
+ /* Limit transfer size to the A2 section boundary */
+ if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN)
+ xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr;
+ start_addr -= ETH_MODULE_SFF_8079_LEN;
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A2,
+ start_addr,
+ xfer_size,
+ user_data);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n");
+ return -EINVAL;
+ }
+ }
return rc;
}
@@ -1200,24 +1454,50 @@ static int bnx2x_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
struct bnx2x *bp = netdev_priv(dev);
- int phy_idx;
- if (!netif_running(dev)) {
- DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ int phy_idx, rc;
+ u8 sff8472_comp, diag_type;
+
+ if (!bnx2x_is_nvm_accessible(bp)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"cannot access eeprom when the interface is down\n");
return -EAGAIN;
}
-
phy_idx = bnx2x_get_cur_phy_idx(bp);
- switch (bp->link_params.phy[phy_idx].media_type) {
- case ETH_PHY_SFPP_10G_FIBER:
- case ETH_PHY_SFP_1G_FIBER:
- case ETH_PHY_DA_TWINAX:
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A0,
+ SFP_EEPROM_SFF_8472_COMP_ADDR,
+ SFP_EEPROM_SFF_8472_COMP_SIZE,
+ &sff8472_comp);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n");
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A0,
+ SFP_EEPROM_DIAG_TYPE_ADDR,
+ SFP_EEPROM_DIAG_TYPE_SIZE,
+ &diag_type);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n");
+ return -EINVAL;
+ }
+
+ if (!sff8472_comp ||
+ (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) {
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- return 0;
- default:
- return -EOPNOTSUPP;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
}
+ return 0;
}
static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
@@ -1269,9 +1549,8 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
int buf_size)
{
int rc;
- u32 cmd_flags;
- u32 align_offset;
- __be32 val;
+ u32 cmd_flags, align_offset, val;
+ __be32 val_be;
if (offset + buf_size > bp->common.flash_size) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
@@ -1290,15 +1569,18 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
align_offset = (offset & ~0x03);
- rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
+ rc = bnx2x_nvram_read_dword(bp, align_offset, &val_be, cmd_flags);
if (rc == 0) {
- val &= ~(0xff << BYTE_OFFSET(offset));
- val |= (*data_buf << BYTE_OFFSET(offset));
-
/* nvram data is returned as an array of bytes
- * convert it back to cpu order */
- val = be32_to_cpu(val);
+ * convert it back to cpu order
+ */
+ val = be32_to_cpu(val_be);
+
+ val &= ~le32_to_cpu((__force __le32)
+ (0xff << BYTE_OFFSET(offset)));
+ val |= le32_to_cpu((__force __le32)
+ (*data_buf << BYTE_OFFSET(offset)));
rc = bnx2x_nvram_write_dword(bp, align_offset, val,
cmd_flags);
@@ -1356,6 +1638,12 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
memcpy(&val, data_buf, 4);
+ /* Notice unlike bnx2x_nvram_read_dword() this will not
+ * change val using be32_to_cpu(), which causes data to flip
+ * if the eeprom is read and then written back. This is due
+ * to tools utilizing this functionality that would break
+ * if this would be resolved.
+ */
rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
/* advance to the next dword */
@@ -1379,7 +1667,8 @@ static int bnx2x_set_eeprom(struct net_device *dev,
int port = BP_PORT(bp);
int rc = 0;
u32 ext_phy_config;
- if (!netif_running(dev)) {
+
+ if (!bnx2x_is_nvm_accessible(bp)) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"cannot access eeprom when the interface is down\n");
return -EAGAIN;
@@ -1509,6 +1798,10 @@ static int bnx2x_set_ringparam(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
+ DP(BNX2X_MSG_ETHTOOL,
+ "set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
+ ering->rx_pending, ering->tx_pending);
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
DP(BNX2X_MSG_ETHTOOL,
"Handling parity error recovery. Try again later\n");
@@ -1588,12 +1881,15 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
bp->link_params.req_flow_ctrl[cfg_idx] =
BNX2X_FLOW_CTRL_AUTO;
}
- bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_NONE;
+ bp->link_params.req_fc_auto_adv = 0;
if (epause->rx_pause)
bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX;
if (epause->tx_pause)
bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX;
+
+ if (!bp->link_params.req_fc_auto_adv)
+ bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_NONE;
}
DP(BNX2X_MSG_ETHTOOL,
@@ -1617,6 +1913,19 @@ static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
"link_test (online) "
};
+enum {
+ BNX2X_PRI_FLAG_ISCSI,
+ BNX2X_PRI_FLAG_FCOE,
+ BNX2X_PRI_FLAG_STORAGE,
+ BNX2X_PRI_FLAG_LEN,
+};
+
+static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
+ "iSCSI offload support",
+ "FCoE offload support",
+ "Storage only interface"
+};
+
static u32 bnx2x_eee_to_adv(u32 eee_adv)
{
u32 modes = 0;
@@ -1737,7 +2046,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
EEE_MODE_OVERRIDE_NVRAM |
EEE_MODE_OUTPUT_TIME;
- /* Restart link to propogate changes */
+ /* Restart link to propagate changes */
if (netif_running(dev)) {
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
bnx2x_force_link_reset(bp);
@@ -1747,7 +2056,6 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-
enum {
BNX2X_CHIP_E1_OFST = 0,
BNX2X_CHIP_E1H_OFST,
@@ -1857,7 +2165,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
{ BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
};
- if (!netif_running(bp->dev)) {
+ if (!bnx2x_is_nvm_accessible(bp)) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"cannot access eeprom when the interface is down\n");
return rc;
@@ -1875,7 +2183,8 @@ static int bnx2x_test_registers(struct bnx2x *bp)
hw = BNX2X_CHIP_MASK_E3;
/* Repeat the test twice:
- First by writing 0x00000000, second by writing 0xffffffff */
+ * First by writing 0x00000000, second by writing 0xffffffff
+ */
for (idx = 0; idx < 2; idx++) {
switch (idx) {
@@ -1960,7 +2269,7 @@ static int bnx2x_test_memory(struct bnx2x *bp)
{ NULL, 0xffffffff, {0, 0, 0, 0} }
};
- if (!netif_running(bp->dev)) {
+ if (!bnx2x_is_nvm_accessible(bp)) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"cannot access eeprom when the interface is down\n");
return rc;
@@ -2294,14 +2603,168 @@ static int bnx2x_test_ext_loopback(struct bnx2x *bp)
return rc;
}
+struct code_entry {
+ u32 sram_start_addr;
+ u32 code_attribute;
+#define CODE_IMAGE_TYPE_MASK 0xf0800003
+#define CODE_IMAGE_VNTAG_PROFILES_DATA 0xd0000003
+#define CODE_IMAGE_LENGTH_MASK 0x007ffffc
+#define CODE_IMAGE_TYPE_EXTENDED_DIR 0xe0000000
+ u32 nvm_start_addr;
+};
+
+#define CODE_ENTRY_MAX 16
+#define CODE_ENTRY_EXTENDED_DIR_IDX 15
+#define MAX_IMAGES_IN_EXTENDED_DIR 64
+#define NVRAM_DIR_OFFSET 0x14
+
+#define EXTENDED_DIR_EXISTS(code) \
+ ((code & CODE_IMAGE_TYPE_MASK) == CODE_IMAGE_TYPE_EXTENDED_DIR && \
+ (code & CODE_IMAGE_LENGTH_MASK) != 0)
+
#define CRC32_RESIDUAL 0xdebb20e3
+#define CRC_BUFF_SIZE 256
+
+static int bnx2x_nvram_crc(struct bnx2x *bp,
+ int offset,
+ int size,
+ u8 *buff)
+{
+ u32 crc = ~0;
+ int rc = 0, done = 0;
+
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "NVRAM CRC from 0x%08x to 0x%08x\n", offset, offset + size);
+
+ while (done < size) {
+ int count = min_t(int, size - done, CRC_BUFF_SIZE);
+
+ rc = bnx2x_nvram_read(bp, offset + done, buff, count);
+
+ if (rc)
+ return rc;
+
+ crc = crc32_le(crc, buff, count);
+ done += count;
+ }
+
+ if (crc != CRC32_RESIDUAL)
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static int bnx2x_test_nvram_dir(struct bnx2x *bp,
+ struct code_entry *entry,
+ u8 *buff)
+{
+ size_t size = entry->code_attribute & CODE_IMAGE_LENGTH_MASK;
+ u32 type = entry->code_attribute & CODE_IMAGE_TYPE_MASK;
+ int rc;
+
+ /* Zero-length images and AFEX profiles do not have CRC */
+ if (size == 0 || type == CODE_IMAGE_VNTAG_PROFILES_DATA)
+ return 0;
+
+ rc = bnx2x_nvram_crc(bp, entry->nvm_start_addr, size, buff);
+ if (rc)
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "image %x has failed crc test (rc %d)\n", type, rc);
+
+ return rc;
+}
+
+static int bnx2x_test_dir_entry(struct bnx2x *bp, u32 addr, u8 *buff)
+{
+ int rc;
+ struct code_entry entry;
+
+ rc = bnx2x_nvram_read32(bp, addr, (u32 *)&entry, sizeof(entry));
+ if (rc)
+ return rc;
+
+ return bnx2x_test_nvram_dir(bp, &entry, buff);
+}
+
+static int bnx2x_test_nvram_ext_dirs(struct bnx2x *bp, u8 *buff)
+{
+ u32 rc, cnt, dir_offset = NVRAM_DIR_OFFSET;
+ struct code_entry entry;
+ int i;
+
+ rc = bnx2x_nvram_read32(bp,
+ dir_offset +
+ sizeof(entry) * CODE_ENTRY_EXTENDED_DIR_IDX,
+ (u32 *)&entry, sizeof(entry));
+ if (rc)
+ return rc;
+
+ if (!EXTENDED_DIR_EXISTS(entry.code_attribute))
+ return 0;
+
+ rc = bnx2x_nvram_read32(bp, entry.nvm_start_addr,
+ &cnt, sizeof(u32));
+ if (rc)
+ return rc;
+
+ dir_offset = entry.nvm_start_addr + 8;
+
+ for (i = 0; i < cnt && i < MAX_IMAGES_IN_EXTENDED_DIR; i++) {
+ rc = bnx2x_test_dir_entry(bp, dir_offset +
+ sizeof(struct code_entry) * i,
+ buff);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int bnx2x_test_nvram_dirs(struct bnx2x *bp, u8 *buff)
+{
+ u32 rc, dir_offset = NVRAM_DIR_OFFSET;
+ int i;
+
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "NVRAM DIRS CRC test-set\n");
+
+ for (i = 0; i < CODE_ENTRY_EXTENDED_DIR_IDX; i++) {
+ rc = bnx2x_test_dir_entry(bp, dir_offset +
+ sizeof(struct code_entry) * i,
+ buff);
+ if (rc)
+ return rc;
+ }
+
+ return bnx2x_test_nvram_ext_dirs(bp, buff);
+}
+
+struct crc_pair {
+ int offset;
+ int size;
+};
+
+static int bnx2x_test_nvram_tbl(struct bnx2x *bp,
+ const struct crc_pair *nvram_tbl, u8 *buf)
+{
+ int i;
+
+ for (i = 0; nvram_tbl[i].size; i++) {
+ int rc = bnx2x_nvram_crc(bp, nvram_tbl[i].offset,
+ nvram_tbl[i].size, buf);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "nvram_tbl[%d] has failed crc test (rc %d)\n",
+ i, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
static int bnx2x_test_nvram(struct bnx2x *bp)
{
- static const struct {
- int offset;
- int size;
- } nvram_tbl[] = {
+ const struct crc_pair nvram_tbl[] = {
{ 0, 0x14 }, /* bootstrap */
{ 0x14, 0xec }, /* dir */
{ 0x100, 0x350 }, /* manuf_info */
@@ -2310,30 +2773,33 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
{ 0x708, 0x70 }, /* manuf_key_info */
{ 0, 0 }
};
- __be32 *buf;
- u8 *data;
- int i, rc;
- u32 magic, crc;
+ const struct crc_pair nvram_tbl2[] = {
+ { 0x7e8, 0x350 }, /* manuf_info2 */
+ { 0xb38, 0xf0 }, /* feature_info */
+ { 0, 0 }
+ };
+
+ u8 *buf;
+ int rc;
+ u32 magic;
if (BP_NOMCP(bp))
return 0;
- buf = kmalloc(0x350, GFP_KERNEL);
+ buf = kmalloc(CRC_BUFF_SIZE, GFP_KERNEL);
if (!buf) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n");
rc = -ENOMEM;
goto test_nvram_exit;
}
- data = (u8 *)buf;
- rc = bnx2x_nvram_read(bp, 0, data, 4);
+ rc = bnx2x_nvram_read32(bp, 0, &magic, sizeof(magic));
if (rc) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"magic value read (rc %d)\n", rc);
goto test_nvram_exit;
}
- magic = be32_to_cpu(buf[0]);
if (magic != 0x669955aa) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"wrong magic value (0x%08x)\n", magic);
@@ -2341,25 +2807,26 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
goto test_nvram_exit;
}
- for (i = 0; nvram_tbl[i].size; i++) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "Port 0 CRC test-set\n");
+ rc = bnx2x_test_nvram_tbl(bp, nvram_tbl, buf);
+ if (rc)
+ goto test_nvram_exit;
- rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
- nvram_tbl[i].size);
- if (rc) {
- DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
- "nvram_tbl[%d] read data (rc %d)\n", i, rc);
- goto test_nvram_exit;
- }
+ if (!CHIP_IS_E1x(bp) && !CHIP_IS_57811xx(bp)) {
+ u32 hide = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
+ SHARED_HW_CFG_HIDE_PORT1;
- crc = ether_crc_le(nvram_tbl[i].size, data);
- if (crc != CRC32_RESIDUAL) {
+ if (!hide) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
- "nvram_tbl[%d] wrong crc value (0x%08x)\n", i, crc);
- rc = -ENODEV;
- goto test_nvram_exit;
+ "Port 1 CRC test-set\n");
+ rc = bnx2x_test_nvram_tbl(bp, nvram_tbl2, buf);
+ if (rc)
+ goto test_nvram_exit;
}
}
+ rc = bnx2x_test_nvram_dirs(bp, buf);
+
test_nvram_exit:
kfree(buf);
return rc;
@@ -2388,8 +2855,8 @@ static void bnx2x_self_test(struct net_device *dev,
struct ethtool_test *etest, u64 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- u8 is_serdes;
- int rc;
+ u8 is_serdes, link_up;
+ int rc, cnt = 0;
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
netdev_err(bp->dev,
@@ -2397,6 +2864,7 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
+
DP(BNX2X_MSG_ETHTOOL,
"Self-test command parameters: offline = %d, external_lb = %d\n",
(etest->flags & ETH_TEST_FL_OFFLINE),
@@ -2404,27 +2872,31 @@ static void bnx2x_self_test(struct net_device *dev,
memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
+ if (bnx2x_test_nvram(bp) != 0) {
+ if (!IS_MF(bp))
+ buf[4] = 1;
+ else
+ buf[0] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
if (!netif_running(dev)) {
- DP(BNX2X_MSG_ETHTOOL,
- "Can't perform self-test when interface is down\n");
+ DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");
return;
}
is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
-
+ link_up = bp->link_vars.link_up;
/* offline tests are not supported in MF mode */
if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
int port = BP_PORT(bp);
u32 val;
- u8 link_up;
/* save current value of input enable for TX port IF */
val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
/* disable input for TX port IF */
REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
- link_up = bp->link_vars.link_up;
-
bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
rc = bnx2x_nic_load(bp, LOAD_DIAG);
if (rc) {
@@ -2471,13 +2943,7 @@ static void bnx2x_self_test(struct net_device *dev,
/* wait until link state is restored */
bnx2x_wait_for_link(bp, link_up, is_serdes);
}
- if (bnx2x_test_nvram(bp) != 0) {
- if (!IS_MF(bp))
- buf[4] = 1;
- else
- buf[0] = 1;
- etest->flags |= ETH_TEST_FL_FAILED;
- }
+
if (bnx2x_test_intr(bp) != 0) {
if (!IS_MF(bp))
buf[5] = 1;
@@ -2486,24 +2952,27 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
}
- if (bnx2x_link_test(bp, is_serdes) != 0) {
+ if (link_up) {
+ cnt = 100;
+ while (bnx2x_link_test(bp, is_serdes) && --cnt)
+ msleep(20);
+ }
+
+ if (!cnt) {
if (!IS_MF(bp))
buf[6] = 1;
else
buf[2] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
-
-#ifdef BNX2X_EXTRA_DEBUG
- bnx2x_panic_dump(bp);
-#endif
}
#define IS_PORT_STAT(i) \
((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
-#define IS_MF_MODE_STAT(bp) \
- (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
+#define HIDE_PORT_STAT(bp) \
+ ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \
+ IS_VF(bp))
/* ethtool statistics are displayed for all regular ethernet queues and the
* fcoe L2 queue if not disabled
@@ -2516,32 +2985,47 @@ static int bnx2x_num_stat_queues(struct bnx2x *bp)
static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
{
struct bnx2x *bp = netdev_priv(dev);
- int i, num_stats;
+ int i, num_strings = 0;
switch (stringset) {
case ETH_SS_STATS:
if (is_multi(bp)) {
- num_stats = bnx2x_num_stat_queues(bp) *
- BNX2X_NUM_Q_STATS;
+ num_strings = bnx2x_num_stat_queues(bp) *
+ BNX2X_NUM_Q_STATS;
} else
- num_stats = 0;
- if (IS_MF_MODE_STAT(bp)) {
+ num_strings = 0;
+ if (HIDE_PORT_STAT(bp)) {
for (i = 0; i < BNX2X_NUM_STATS; i++)
if (IS_FUNC_STAT(i))
- num_stats++;
+ num_strings++;
} else
- num_stats += BNX2X_NUM_STATS;
+ num_strings += BNX2X_NUM_STATS;
- return num_stats;
+ return num_strings;
case ETH_SS_TEST:
return BNX2X_NUM_TESTS(bp);
+ case ETH_SS_PRIV_FLAGS:
+ return BNX2X_PRI_FLAG_LEN;
+
default:
return -EINVAL;
}
}
+static u32 bnx2x_get_private_flags(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 flags = 0;
+
+ flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI;
+ flags |= (!(bp->flags & NO_FCOE_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_FCOE;
+ flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE;
+
+ return flags;
+}
+
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -2564,9 +3048,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
}
-
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
strcpy(buf + (k + j)*ETH_GSTRING_LEN,
bnx2x_stats_arr[i].string);
@@ -2583,6 +3066,12 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
start = 4;
memcpy(buf, bnx2x_tests_str_arr + start,
ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
+ break;
+
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(buf, bnx2x_private_arr,
+ ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN);
+ break;
}
}
@@ -2618,7 +3107,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
hw_stats = (u32 *)&bp->eth_stats;
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
if (bnx2x_stats_arr[i].size == 0) {
/* skip this counter */
@@ -2644,17 +3133,12 @@ static int bnx2x_set_phys_id(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
- if (!netif_running(dev)) {
+ if (!bnx2x_is_nvm_accessible(bp)) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"cannot access eeprom when the interface is down\n");
return -EAGAIN;
}
- if (!bp->port.pmf) {
- DP(BNX2X_MSG_ETHTOOL, "Interface is not pmf\n");
- return -EOPNOTSUPP;
- }
-
switch (state) {
case ETHTOOL_ID_ACTIVE:
return 1; /* cycle on/off once per second */
@@ -2686,7 +3170,6 @@ static int bnx2x_set_phys_id(struct net_device *dev,
static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
{
-
switch (info->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
@@ -2753,15 +3236,14 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"Command parameters not supported\n");
return -EINVAL;
- } else {
- return 0;
}
+ return 0;
case UDP_V4_FLOW:
case UDP_V6_FLOW:
/* For UDP either 2-tupple hash or 4-tupple hash is supported */
if (info->data == (RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
udp_rss_requested = 1;
else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
udp_rss_requested = 0;
@@ -2773,17 +3255,17 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
} else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- } else {
- return 0;
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
}
+ return 0;
+
case IPV4_FLOW:
case IPV6_FLOW:
/* For IP only 2-tupple hash is supported */
@@ -2791,9 +3273,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"Command parameters not supported\n");
return -EINVAL;
- } else {
- return 0;
}
+ return 0;
+
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2809,9 +3291,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"Command parameters not supported\n");
return -EINVAL;
- } else {
- return 0;
}
+ return 0;
+
default:
return -EINVAL;
}
@@ -2835,7 +3317,7 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
return T_ETH_INDIRECTION_TABLE_SIZE;
}
-static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
+static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
{
struct bnx2x *bp = netdev_priv(dev);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
@@ -2859,14 +3341,15 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
return 0;
}
-static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
+static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
/*
- * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
+ * The same as in bnx2x_get_rxfh: we can't use a memcpy()
* as an internal storage of an indirection table is a u8 array
* while indir->ring_index points to an array of u32.
*
@@ -2923,7 +3406,6 @@ static int bnx2x_set_channels(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
-
DP(BNX2X_MSG_ETHTOOL,
"set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
channels->rx_count, channels->tx_count, channels->other_count,
@@ -2964,6 +3446,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_drvinfo = bnx2x_get_drvinfo,
.get_regs_len = bnx2x_get_regs_len,
.get_regs = bnx2x_get_regs,
+ .get_dump_flag = bnx2x_get_dump_flag,
+ .get_dump_data = bnx2x_get_dump_data,
+ .set_dump = bnx2x_set_dump,
.get_wol = bnx2x_get_wol,
.set_wol = bnx2x_set_wol,
.get_msglevel = bnx2x_get_msglevel,
@@ -2981,14 +3466,15 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.set_pauseparam = bnx2x_set_pauseparam,
.self_test = bnx2x_self_test,
.get_sset_count = bnx2x_get_sset_count,
+ .get_priv_flags = bnx2x_get_private_flags,
.get_strings = bnx2x_get_strings,
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
.set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
- .get_rxfh_indir = bnx2x_get_rxfh_indir,
- .set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_rxfh = bnx2x_get_rxfh,
+ .set_rxfh = bnx2x_set_rxfh,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_module_info = bnx2x_get_module_info,
@@ -2998,7 +3484,30 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
};
-void bnx2x_set_ethtool_ops(struct net_device *netdev)
+static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
+ .get_settings = bnx2x_get_settings,
+ .set_settings = bnx2x_set_settings,
+ .get_drvinfo = bnx2x_get_drvinfo,
+ .get_msglevel = bnx2x_get_msglevel,
+ .set_msglevel = bnx2x_set_msglevel,
+ .get_link = bnx2x_get_link,
+ .get_coalesce = bnx2x_get_coalesce,
+ .get_ringparam = bnx2x_get_ringparam,
+ .set_ringparam = bnx2x_set_ringparam,
+ .get_sset_count = bnx2x_get_sset_count,
+ .get_strings = bnx2x_get_strings,
+ .get_ethtool_stats = bnx2x_get_ethtool_stats,
+ .get_rxnfc = bnx2x_get_rxnfc,
+ .set_rxnfc = bnx2x_set_rxnfc,
+ .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
+ .get_rxfh = bnx2x_get_rxfh,
+ .set_rxfh = bnx2x_set_rxfh,
+ .get_channels = bnx2x_get_channels,
+ .set_channels = bnx2x_set_channels,
+};
+
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+ netdev->ethtool_ops = (IS_PF(bp)) ?
+ &bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 60a83ad1037..95dc3654354 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -1,6 +1,6 @@
/* bnx2x_fw_defs.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,31 +30,31 @@
* IRO[138].m2) + ((sbId) * IRO[138].m3))
#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[316].base + ((pfId) * IRO[316].m1))
-#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[317].base + ((pfId) * IRO[317].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[318].base + ((pfId) * IRO[318].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
- (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
+ (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+ (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+ (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
- (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
+ (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
- (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
+ (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
- (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
+ (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
- (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+ (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[315].base + ((pfId) * IRO[315].m1))
+ (IRO[316].base + ((pfId) * IRO[316].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[307].base + ((pfId) * IRO[307].m1))
+ (IRO[308].base + ((pfId) * IRO[308].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[306].base + ((pfId) * IRO[306].m1))
+ (IRO[307].base + ((pfId) * IRO[307].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[305].base + ((pfId) * IRO[305].m1))
+ (IRO[306].base + ((pfId) * IRO[306].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[151].base + ((funcId) * IRO[151].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -87,7 +87,6 @@
(IRO[156].base + ((vfId) * IRO[156].m1))
#define CSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[150].base + ((funcId) * IRO[150].m1))
-#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
(IRO[203].base + ((pfId) * IRO[203].m1))
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
@@ -114,7 +113,7 @@
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[268].base + ((pfId) * IRO[268].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
- (IRO[277].base + ((pfId) * IRO[277].m1))
+ (IRO[278].base + ((pfId) * IRO[278].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[264].base + ((pfId) * IRO[264].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
@@ -136,35 +135,32 @@
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[176].base + ((assertListEntry) * IRO[176].m1))
-#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
- (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
- IRO[205].m2))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[183].base + ((portId) * IRO[183].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
- (IRO[318].base + ((pfId) * IRO[318].m1))
+ (IRO[319].base + ((pfId) * IRO[319].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[178].base + ((funcId) * IRO[178].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[282].base + ((pfId) * IRO[282].m1))
-#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[283].base + ((pfId) * IRO[283].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[284].base + ((pfId) * IRO[284].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[287].base + ((pfId) * IRO[287].m1))
+ (IRO[288].base + ((pfId) * IRO[288].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
- (IRO[284].base + ((pfId) * IRO[284].m1))
+ (IRO[285].base + ((pfId) * IRO[285].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[280].base + ((pfId) * IRO[280].m1))
+ (IRO[281].base + ((pfId) * IRO[281].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[279].base + ((pfId) * IRO[279].m1))
+ (IRO[280].base + ((pfId) * IRO[280].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[278].base + ((pfId) * IRO[278].m1))
+ (IRO[279].base + ((pfId) * IRO[279].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[281].base + ((pfId) * IRO[281].m1))
+ (IRO[282].base + ((pfId) * IRO[282].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
- (IRO[285].base + ((pfId) * IRO[285].m1))
-#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[286].base + ((pfId) * IRO[286].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[287].base + ((pfId) * IRO[287].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[182].base + ((pfId) * IRO[182].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -190,39 +186,39 @@
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[295].base + ((pfId) * IRO[295].m1))
+ (IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
- (IRO[298].base + ((pfId) * IRO[298].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[299].base + ((pfId) * IRO[299].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[300].base + ((pfId) * IRO[300].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[301].base + ((pfId) * IRO[301].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[302].base + ((pfId) * IRO[302].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[303].base + ((pfId) * IRO[303].m1))
-#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[304].base + ((pfId) * IRO[304].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+ (IRO[305].base + ((pfId) * IRO[305].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[294].base + ((pfId) * IRO[294].m1))
+ (IRO[295].base + ((pfId) * IRO[295].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[293].base + ((pfId) * IRO[293].m1))
+ (IRO[294].base + ((pfId) * IRO[294].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[292].base + ((pfId) * IRO[292].m1))
+ (IRO[293].base + ((pfId) * IRO[293].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[297].base + ((pfId) * IRO[297].m1))
+ (IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
- (IRO[296].base + ((pfId) * IRO[296].m1))
+ (IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
- (IRO[291].base + ((pfId) * IRO[291].m1))
+ (IRO[292].base + ((pfId) * IRO[292].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[290].base + ((pfId) * IRO[290].m1))
+ (IRO[291].base + ((pfId) * IRO[291].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
- (IRO[289].base + ((pfId) * IRO[289].m1))
+ (IRO[290].base + ((pfId) * IRO[290].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
- (IRO[288].base + ((pfId) * IRO[288].m1))
+ (IRO[289].base + ((pfId) * IRO[289].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -305,12 +301,10 @@
#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
-
/* Maximal aggregation queues supported */
#define ETH_MAX_AGGREGATION_QUEUES_E1 32
#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
-
#define ETH_NUM_OF_MCAST_BINS 256
#define ETH_NUM_OF_MCAST_ENGINES_E2 72
@@ -353,7 +347,6 @@
/* max number of slow path commands per port */
#define MAX_RAMRODS_PER_PORT 8
-
/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
#define TIMERS_TICK_SIZE_CHIP (1e-3)
@@ -380,7 +373,6 @@
that is not mapped to priority*/
#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
-
#define C_ERES_PER_PAGE \
(PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
@@ -391,8 +383,10 @@
#define INVALID_VNIC_ID 0xFF
-
#define UNDEF_IRO 0x80000000
+/* used for defining the amount of FCoE tasks supported for PF */
+#define MAX_FCOE_FUNCS_PER_ENGINE 2
+#define MAX_NUM_FCOE_TASKS_PER_ENGINE 4096
#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index 4bed52ba300..8aafd9b5d6a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -1,13 +1,13 @@
/* bnx2x_fw_file_hdr.h: FW binary file header structure.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
* Based on the original idea of John Wright <john.wright@hp.com>.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 3369a50ac6b..5ba8af50c84 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1,6 +1,6 @@
/* bnx2x_hsi.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -114,6 +114,10 @@ struct license_key {
#define EPIO_CFG_EPIO30 0x0000001f
#define EPIO_CFG_EPIO31 0x00000020
+struct mac_addr {
+ u32 upper;
+ u32 lower;
+};
struct shared_hw_cfg { /* NVRAM Offset */
/* Up to 16 bytes of NULL-terminated string */
@@ -168,6 +172,7 @@ struct shared_hw_cfg { /* NVRAM Offset */
#define SHARED_HW_CFG_LED_MAC4 0x000c0000
#define SHARED_HW_CFG_LED_PHY8 0x000d0000
#define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
+ #define SHARED_HW_CFG_LED_EXTPHY2 0x000f0000
#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
@@ -508,7 +513,22 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000
#define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001
- u32 reserved0[6]; /* 0x178 */
+ /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2
+ * LOM recommended and tested value is 0xBEB2. Using a different
+ * value means using a value not tested by BRCM
+ */
+ u32 sfi_tap_values; /* 0x178 */
+ #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF
+ #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0
+
+ /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested
+ * value is 0x2. LOM recommended and tested value is 0x2. Using a
+ * different value means using a value not tested by BRCM
+ */
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16
+
+ u32 reserved0[5]; /* 0x17c */
u32 aeu_int_mask; /* 0x190 */
@@ -899,6 +919,10 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
#define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000
#define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK 0x00000C00
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE 0x00000400
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI 0x00000800
+
#define PORT_FEATURE_EN_SIZE_MASK 0x0f000000
#define PORT_FEATURE_EN_SIZE_SHIFT 24
#define PORT_FEATURE_WOL_ENABLED 0x01000000
@@ -1277,6 +1301,9 @@ struct drv_func_mb {
#define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
+ #define DRV_MSG_CODE_RMMOD 0xdb000000
+ #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f
+
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
@@ -1300,6 +1327,8 @@ struct drv_func_mb {
#define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002
#define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a
+ #define DRV_MSG_CODE_LOAD_REQ_FORCE_LFA 0x00002000
+
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
@@ -1347,6 +1376,8 @@ struct drv_func_mb {
#define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
+ #define FW_MSG_CODE_RMMOD_ACK 0xdb100000
+
#define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
#define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
@@ -1972,6 +2003,23 @@ struct shmem_lfa {
#define SHMEM_LFA_DONT_CLEAR_STAT (1<<24)
};
+/* Used to support NSCI get OS driver version
+ * on driver load the version value will be set
+ * on driver unload driver value of 0x0 will be set.
+ */
+struct os_drv_ver {
+#define DRV_VER_NOT_LOADED 0
+
+ /* personalties order is important */
+#define DRV_PERS_ETHERNET 0
+#define DRV_PERS_ISCSI 1
+#define DRV_PERS_FCOE 2
+
+ /* shmem2 struct is constant can't add more personalties here */
+#define MAX_DRV_PERS 3
+ u32 versions[MAX_DRV_PERS];
+};
+
struct ncsi_oem_fcoe_features {
u32 fcoe_features1;
#define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF
@@ -2186,6 +2234,18 @@ struct shmem2_region {
u32 reserved4; /* Offset 0x150 */
u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
#define LINK_ATTR_SYNC_KR2_ENABLE (1<<0)
+
+ u32 reserved5[2];
+ u32 reserved6[PORT_MAX];
+
+ /* driver version for each personality */
+ struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */
+
+ /* Flag to the driver that PF's drv_info_host_addr buffer was read */
+ u32 mfw_drv_indication;
+
+ /* We use indication for each PF (0..3) */
+#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
};
@@ -2817,8 +2877,8 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 8
-#define BCM_5710_FW_REVISION_VERSION 2
-#define BCM_5710_FW_ENGINEERING_VERSION 0
+#define BCM_5710_FW_REVISION_VERSION 19
+#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3374,6 +3434,10 @@ struct regpair {
__le32 hi;
};
+struct regpair_native {
+ u32 lo;
+ u32 hi;
+};
/*
* Classify rule opcodes in E2/E3
@@ -3505,11 +3569,14 @@ struct client_init_tx_data {
#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3)
#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
-#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
-#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
+#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4)
+#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4
u8 default_vlan_flg;
u8 force_default_pri_flg;
- __le32 reserved3;
+ u8 tunnel_lso_inc_ip_id;
+ u8 refuse_outband_vlan_flg;
+ u8 tunnel_non_lso_pcsum_location;
+ u8 reserved1;
};
/*
@@ -3543,6 +3610,11 @@ struct client_update_ramrod_data {
__le16 silent_vlan_mask;
u8 silent_vlan_removal_flg;
u8 silent_vlan_change_flg;
+ u8 refuse_outband_vlan_flg;
+ u8 refuse_outband_vlan_change_flg;
+ u8 tx_switching_flg;
+ u8 tx_switching_change_flg;
+ __le32 reserved1;
__le32 echo;
};
@@ -3612,7 +3684,8 @@ struct eth_classify_header {
*/
struct eth_classify_mac_cmd {
struct eth_classify_cmd_header header;
- __le32 reserved0;
+ __le16 reserved0;
+ __le16 inner_mac;
__le16 mac_lsb;
__le16 mac_mid;
__le16 mac_msb;
@@ -3625,7 +3698,8 @@ struct eth_classify_mac_cmd {
*/
struct eth_classify_pair_cmd {
struct eth_classify_cmd_header header;
- __le32 reserved0;
+ __le16 reserved0;
+ __le16 inner_mac;
__le16 mac_lsb;
__le16 mac_mid;
__le16 mac_msb;
@@ -3779,7 +3853,8 @@ struct eth_fast_path_rx_cqe {
__le16 len_on_bd;
struct parsing_flags pars_flags;
union eth_sgl_or_raw_data sgl_or_raw_data;
- __le32 reserved1[8];
+ __le32 reserved1[7];
+ u32 marker;
};
@@ -3847,8 +3922,68 @@ struct eth_halt_ramrod_data {
/*
- * Command for setting multicast classification for a client
+ * destination and source mac address.
+ */
+struct eth_mac_addresses {
+#if defined(__BIG_ENDIAN)
+ __le16 dst_mid;
+ __le16 dst_lo;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_lo;
+ __le16 dst_mid;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 src_lo;
+ __le16 dst_hi;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_hi;
+ __le16 src_lo;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 src_hi;
+ __le16 src_mid;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 src_mid;
+ __le16 src_hi;
+#endif
+};
+
+/* tunneling related data */
+struct eth_tunnel_data {
+#if defined(__BIG_ENDIAN)
+ __le16 dst_mid;
+ __le16 dst_lo;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_lo;
+ __le16 dst_mid;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 reserved0;
+ __le16 dst_hi;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_hi;
+ __le16 reserved0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 reserved1;
+ u8 ip_hdr_start_inner_w;
+ __le16 pseudo_csum;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 pseudo_csum;
+ u8 ip_hdr_start_inner_w;
+ u8 reserved1;
+#endif
+};
+
+/* union for mac addresses and for tunneling data.
+ * considered as tunneling data only if (tunnel_exist == 1).
*/
+union eth_mac_addr_or_tunnel_data {
+ struct eth_mac_addresses mac_addr;
+ struct eth_tunnel_data tunnel_data;
+};
+
+/*Command for setting multicast classification for a client */
struct eth_multicast_rules_cmd {
u8 cmd_general_data;
#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0)
@@ -3866,7 +4001,6 @@ struct eth_multicast_rules_cmd {
struct regpair reserved3;
};
-
/*
* parameters for multicast classification ramrod
*/
@@ -3875,7 +4009,6 @@ struct eth_multicast_rules_ramrod_data {
struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
};
-
/*
* Place holder for ramrods protocol specific data
*/
@@ -3939,11 +4072,14 @@ struct eth_rss_update_ramrod_data {
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6)
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6
#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
u8 rss_result_mask;
u8 rss_mode;
- __le32 __reserved2;
+ __le16 udp_4tuple_dst_port_mask;
+ __le16 udp_4tuple_dst_port_value;
u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
__le32 rss_key[T_ETH_RSS_KEY];
__le32 echo;
@@ -4107,6 +4243,23 @@ enum eth_tpa_update_command {
MAX_ETH_TPA_UPDATE_COMMAND
};
+/* In case of LSO over IPv4 tunnel, whether to increment
+ * IP ID on external IP header or internal IP header
+ */
+enum eth_tunnel_lso_inc_ip_id {
+ EXT_HEADER,
+ INT_HEADER,
+ MAX_ETH_TUNNEL_LSO_INC_IP_ID
+};
+
+/* In case tunnel exist and L4 checksum offload,
+ * the pseudo checksum location, on packet or on BD.
+ */
+enum eth_tunnel_non_lso_pcsum_location {
+ PCSUM_ON_PKT,
+ PCSUM_ON_BD,
+ MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION
+};
/*
* Tx regular BD structure
@@ -4158,8 +4311,8 @@ struct eth_tx_start_bd {
#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
-#define ETH_TX_START_BD_RESREVED (0x1<<7)
-#define ETH_TX_START_BD_RESREVED_SHIFT 7
+#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7)
+#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7
};
/*
@@ -4208,15 +4361,10 @@ struct eth_tx_parse_bd_e1x {
* Tx parsing BD structure for ETH E2
*/
struct eth_tx_parse_bd_e2 {
- __le16 dst_mac_addr_lo;
- __le16 dst_mac_addr_mid;
- __le16 dst_mac_addr_hi;
- __le16 src_mac_addr_lo;
- __le16 src_mac_addr_mid;
- __le16 src_mac_addr_hi;
+ union eth_mac_addr_or_tunnel_data data;
__le32 parsing_data;
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x7FF<<0)
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0)
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0
#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
@@ -4228,8 +4376,51 @@ struct eth_tx_parse_bd_e2 {
};
/*
- * The last BD in the BD memory will hold a pointer to the next BD memory
+ * Tx 2nd parsing BD structure for ETH packet
*/
+struct eth_tx_parse_2nd_bd {
+ __le16 global_data;
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
+#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13)
+#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13
+ __le16 reserved1;
+ u8 tcp_flags;
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0)
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1)
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2)
+#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3)
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
+ u8 reserved2;
+ u8 tunnel_udp_hdr_start_w;
+ u8 fw_ip_hdr_to_payload_w;
+ __le16 fw_ip_csum_wo_len_flags_frag;
+ __le16 hw_ip_id;
+ __le32 tcp_send_seq;
+};
+
+/* The last BD in the BD memory will hold a pointer to the next BD memory */
struct eth_tx_next_bd {
__le32 addr_lo;
__le32 addr_hi;
@@ -4244,6 +4435,7 @@ union eth_tx_bd_types {
struct eth_tx_bd reg_bd;
struct eth_tx_parse_bd_e1x parse_bd_e1x;
struct eth_tx_parse_bd_e2 parse_bd_e2;
+ struct eth_tx_parse_2nd_bd parse_2nd_bd;
struct eth_tx_next_bd next_bd;
};
@@ -4400,13 +4592,13 @@ struct tstorm_eth_function_common_config {
* MAC filtering configuration parameters per port in Tstorm
*/
struct tstorm_eth_mac_filter_config {
- __le32 ucast_drop_all;
- __le32 ucast_accept_all;
- __le32 mcast_drop_all;
- __le32 mcast_accept_all;
- __le32 bcast_accept_all;
- __le32 vlan_filter[2];
- __le32 unmatched_unicast;
+ u32 ucast_drop_all;
+ u32 ucast_accept_all;
+ u32 mcast_drop_all;
+ u32 mcast_accept_all;
+ u32 bcast_accept_all;
+ u32 vlan_filter[2];
+ u32 unmatched_unicast;
};
@@ -4655,10 +4847,10 @@ enum common_spqe_cmd_id {
RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
RAMROD_CMD_ID_COMMON_START_TRAFFIC,
RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
+ RAMROD_CMD_ID_COMMON_SET_TIMESYNC,
MAX_COMMON_SPQE_CMD_ID
};
-
/*
* Per-protocol connection types
*/
@@ -4855,7 +5047,7 @@ struct vf_flr_event_data {
*/
struct malicious_vf_event_data {
u8 vf_id;
- u8 reserved0;
+ u8 err_id;
u16 reserved1;
u32 reserved2;
u32 reserved3;
@@ -4898,7 +5090,7 @@ union event_data {
* per PF event ring data
*/
struct event_ring_data {
- struct regpair base_addr;
+ struct regpair_native base_addr;
#if defined(__BIG_ENDIAN)
u8 index_id;
u8 sb_id;
@@ -4961,10 +5153,10 @@ enum event_ring_opcode {
EVENT_RING_OPCODE_CLASSIFICATION_RULES,
EVENT_RING_OPCODE_FILTERS_RULES,
EVENT_RING_OPCODE_MULTICAST_RULES,
+ EVENT_RING_OPCODE_SET_TIMESYNC,
MAX_EVENT_RING_OPCODE
};
-
/*
* Modes for fairness algorithm
*/
@@ -5002,14 +5194,18 @@ struct flow_control_configuration {
*/
struct function_start_data {
u8 function_mode;
- u8 reserved;
+ u8 allow_npar_tx_switching;
__le16 sd_vlan_tag;
__le16 vif_id;
u8 path_id;
u8 network_cos_mode;
+ u8 dmae_cmd_id;
+ u8 gre_tunnel_mode;
+ u8 gre_tunnel_rss;
+ u8 nvgre_clss_en;
+ __le16 reserved1[2];
};
-
struct function_update_data {
u8 vif_id_change_flg;
u8 afex_default_vlan_change_flg;
@@ -5019,14 +5215,19 @@ struct function_update_data {
__le16 afex_default_vlan;
u8 allowed_priorities;
u8 network_cos_mode;
+ u8 lb_mode_en_change_flg;
u8 lb_mode_en;
u8 tx_switch_suspend_change_flg;
u8 tx_switch_suspend;
u8 echo;
- __le16 reserved1;
+ u8 reserved1;
+ u8 update_gre_cfg_flg;
+ u8 gre_tunnel_mode;
+ u8 gre_tunnel_rss;
+ u8 nvgre_clss_en;
+ u32 reserved3;
};
-
/*
* FW version stored in the Xstorm RAM
*/
@@ -5053,6 +5254,22 @@ struct fw_version {
#define __FW_VERSION_RESERVED_SHIFT 4
};
+/* GRE RSS Mode */
+enum gre_rss_mode {
+ GRE_OUTER_HEADERS_RSS,
+ GRE_INNER_HEADERS_RSS,
+ NVGRE_KEY_ENTROPY_RSS,
+ MAX_GRE_RSS_MODE
+};
+
+/* GRE Tunnel Mode */
+enum gre_tunnel_type {
+ NO_GRE_TUNNEL,
+ NVGRE_TUNNEL,
+ L2GRE_TUNNEL,
+ IPGRE_TUNNEL,
+ MAX_GRE_TUNNEL_TYPE
+};
/*
* Dynamic Host-Coalescing - Driver(host) counters
@@ -5131,7 +5348,7 @@ struct pci_entity {
* The fast-path status block meta-data, common to all chips
*/
struct hc_sb_data {
- struct regpair host_sb_addr;
+ struct regpair_native host_sb_addr;
struct hc_status_block_sm state_machine[HC_SB_MAX_SM];
struct pci_entity p_func;
#if defined(__BIG_ENDIAN)
@@ -5145,7 +5362,7 @@ struct hc_sb_data {
u8 state;
u8 rsrv0;
#endif
- struct regpair rsrv1[2];
+ struct regpair_native rsrv1[2];
};
@@ -5163,7 +5380,7 @@ enum hc_segment {
* The fast-path status block meta-data
*/
struct hc_sp_status_block_data {
- struct regpair host_sb_addr;
+ struct regpair_native host_sb_addr;
#if defined(__BIG_ENDIAN)
u8 rsrv1;
u8 state;
@@ -5216,6 +5433,26 @@ enum ip_ver {
MAX_IP_VER
};
+/*
+ * Malicious VF error ID
+ */
+enum malicious_vf_error_id {
+ VF_PF_CHANNEL_NOT_READY,
+ ETH_ILLEGAL_BD_LENGTHS,
+ ETH_PACKET_TOO_SHORT,
+ ETH_PAYLOAD_TOO_BIG,
+ ETH_ILLEGAL_ETH_TYPE,
+ ETH_ILLEGAL_LSO_HDR_LEN,
+ ETH_TOO_MANY_BDS,
+ ETH_ZERO_HDR_NBDS,
+ ETH_START_BD_NOT_SET,
+ ETH_ILLEGAL_PARSE_NBDS,
+ ETH_IPV6_AND_CHECKSUM,
+ ETH_VLAN_FLG_INCORRECT,
+ ETH_ILLEGAL_LSO_MSS,
+ ETH_TUNNEL_NOT_SUPPORTED,
+ MAX_MALICIOUS_VF_ERROR_ID
+};
/*
* Multi-function modes
@@ -5360,7 +5597,6 @@ struct protocol_common_spe {
union protocol_common_specific_data data;
};
-
/*
* The send queue element
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index c8f10f0e8a0..bd90e50bd8e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -1,15 +1,15 @@
/* bnx2x_init.h: Broadcom Everest network driver.
* Structures and macroes needed during the initialization.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
- * Modified by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Modified by: Vladislav Zolotarov
*/
#ifndef BNX2X_INIT_H
@@ -640,23 +640,35 @@ static const struct {
* [30] MCP Latched ump_tx_parity
* [31] MCP Latched scpad_parity
*/
-#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
+#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS \
(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
- AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
+
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
+ (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
/* Below registers control the MCP parity attention output. When
* MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
* enabled, when cleared - disabled.
*/
-static const u32 mcp_attn_ctl_regs[] = {
- MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
- MISC_REG_AEU_ENABLE4_NIG_0,
- MISC_REG_AEU_ENABLE4_PXP_0,
- MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
- MISC_REG_AEU_ENABLE4_NIG_1,
- MISC_REG_AEU_ENABLE4_PXP_1
+static const struct {
+ u32 addr;
+ u32 bits;
+} mcp_attn_ctl_regs[] = {
+ { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+ MISC_AEU_ENABLE_MCP_PRTY_BITS },
+ { MISC_REG_AEU_ENABLE4_NIG_0,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+ { MISC_REG_AEU_ENABLE4_PXP_0,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+ { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+ MISC_AEU_ENABLE_MCP_PRTY_BITS },
+ { MISC_REG_AEU_ENABLE4_NIG_1,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+ { MISC_REG_AEU_ENABLE4_PXP_1,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
};
static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
@@ -665,14 +677,14 @@ static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
u32 reg_val;
for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
- reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
+ reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr);
if (enable)
- reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+ reg_val |= mcp_attn_ctl_regs[i].bits;
else
- reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+ reg_val &= ~mcp_attn_ctl_regs[i].bits;
- REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
+ REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index d755acfe7a4..5669ed2e87d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -2,14 +2,14 @@
* Static functions needed during the initialization.
* This file is "included" in bnx2x_main.c.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
*/
#ifndef BNX2X_INIT_OPS_H
@@ -218,7 +218,7 @@ static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
/* gunzip_outlen is in dwords */
len = GUNZIP_OUTLEN(bp);
for (i = 0; i < len; i++)
- ((u32 *)GUNZIP_BUF(bp))[i] =
+ ((u32 *)GUNZIP_BUF(bp))[i] = (__force u32)
cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
bnx2x_write_big_buf_wb(bp, addr, len);
@@ -232,7 +232,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
u16 op_end =
INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
STAGE_END)];
- union init_op *op;
+ const union init_op *op;
u32 op_idx, op_type, addr, len;
const u32 *data, *data_base;
@@ -244,7 +244,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
for (op_idx = op_start; op_idx < op_end; op_idx++) {
- op = (union init_op *)&(INIT_OPS(bp)[op_idx]);
+ op = (const union init_op *)&(INIT_OPS(bp)[op_idx]);
/* Get generic data */
op_type = op->raw.op;
addr = op->raw.offset;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 09096b43a6e..53fb4fa61b4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
-/* Copyright 2008-2012 Broadcom Corporation
+/* Copyright 2008-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -27,6 +27,10 @@
#include "bnx2x.h"
#include "bnx2x_cmn.h"
+typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8);
/********************************************************/
#define ETH_HLEN 14
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
@@ -152,6 +156,7 @@
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
#define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
#define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
+ #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
#define SFP_EEPROM_COMP_CODE_ADDR 0x3
@@ -170,6 +175,7 @@
#define EDC_MODE_LINEAR 0x0022
#define EDC_MODE_LIMITING 0x0044
#define EDC_MODE_PASSIVE_DAC 0x0055
+#define EDC_MODE_ACTIVE_DAC 0x0066
/* ETS defines*/
#define DCBX_INVALID_COS (0xFF)
@@ -199,6 +205,11 @@
(_bank + (_addr & 0xf)), \
_val)
+static int bnx2x_check_half_open_conn(struct link_params *params,
+ struct link_vars *vars, u8 notify);
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params);
+
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@@ -1393,57 +1404,6 @@ static void bnx2x_update_pfc_xmac(struct link_params *params,
udelay(30);
}
-
-static void bnx2x_emac_get_pfc_stat(struct link_params *params,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2])
-{
- /* Read pfc statistic */
- struct bnx2x *bp = params->bp;
- u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
- u32 val_xon = 0;
- u32 val_xoff = 0;
-
- DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
-
- /* PFC received frames */
- val_xoff = REG_RD(bp, emac_base +
- EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
- val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
- val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
- val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
-
- pfc_frames_received[0] = val_xon + val_xoff;
-
- /* PFC received sent */
- val_xoff = REG_RD(bp, emac_base +
- EMAC_REG_RX_PFC_STATS_XOFF_SENT);
- val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
- val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
- val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
-
- pfc_frames_sent[0] = val_xon + val_xoff;
-}
-
-/* Read pfc statistic*/
-void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2])
-{
- /* Read pfc statistic */
- struct bnx2x *bp = params->bp;
-
- DP(NETIF_MSG_LINK, "pfc statistic\n");
-
- if (!vars->link_up)
- return;
-
- if (vars->mac_type == MAC_TYPE_EMAC) {
- DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
- bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
- pfc_frames_received);
- }
-}
/******************************************************************/
/* MAC/PBF section */
/******************************************************************/
@@ -2258,7 +2218,6 @@ int bnx2x_update_pfc(struct link_params *params,
*/
u32 val;
struct bnx2x *bp = params->bp;
- int bnx2x_status = 0;
u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
@@ -2272,7 +2231,7 @@ int bnx2x_update_pfc(struct link_params *params,
bnx2x_update_pfc_nig(params, vars, pfc_params);
if (!vars->link_up)
- return bnx2x_status;
+ return 0;
DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
@@ -2286,7 +2245,7 @@ int bnx2x_update_pfc(struct link_params *params,
== 0) {
DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
bnx2x_emac_enable(params, vars, 0);
- return bnx2x_status;
+ return 0;
}
if (CHIP_IS_E2(bp))
bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
@@ -2300,7 +2259,7 @@ int bnx2x_update_pfc(struct link_params *params,
val = 1;
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
}
- return bnx2x_status;
+ return 0;
}
static int bnx2x_bmac1_enable(struct link_params *params,
@@ -3116,7 +3075,7 @@ static void bnx2x_bsc_module_sel(struct link_params *params)
}
static int bnx2x_bsc_read(struct link_params *params,
- struct bnx2x_phy *phy,
+ struct bnx2x *bp,
u8 sl_devid,
u16 sl_addr,
u8 lc_addr,
@@ -3125,12 +3084,6 @@ static int bnx2x_bsc_read(struct link_params *params,
{
u32 val, i;
int rc = 0;
- struct bnx2x *bp = params->bp;
-
- if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) {
- DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid);
- return -EINVAL;
- }
if (xfer_cnt > 16) {
DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
@@ -3426,13 +3379,19 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
switch (phy->req_flow_ctrl) {
case BNX2X_FLOW_CTRL_AUTO:
- if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
+ switch (params->req_fc_auto_adv) {
+ case BNX2X_FLOW_CTRL_BOTH:
*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
- else
+ break;
+ case BNX2X_FLOW_CTRL_RX:
+ case BNX2X_FLOW_CTRL_TX:
*ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ break;
+ default:
+ break;
+ }
break;
-
case BNX2X_FLOW_CTRL_TX:
*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
@@ -3629,6 +3588,16 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
* init configuration, and set/clear SGMII flag. Internal
* phy init is done purely in phy_init stage.
*/
+#define WC_TX_DRIVER(post2, idriver, ipre) \
+ ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \
+ (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \
+ (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))
+
+#define WC_TX_FIR(post, main, pre) \
+ ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \
+ (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \
+ (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET))
+
static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -3659,7 +3628,7 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL49_USERB0_CTRL, (3<<6));
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -3668,6 +3637,41 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
bnx2x_update_link_attr(params, vars->link_attr_sync);
}
+static void bnx2x_disable_kr2(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ int i;
+ static struct bnx2x_reg_set reg_set[] = {
+ /* Step 1 - Program the TX/RX alignment markers */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
+ };
+ DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
+
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+ vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+ bnx2x_update_link_attr(params, vars->link_attr_sync);
+
+ vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
+}
+
static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -3698,8 +3702,8 @@ static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,
static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
- u16 lane, i, cl72_ctrl, an_adv = 0;
- u16 ucode_ver;
+ u16 lane, i, cl72_ctrl, an_adv = 0, val;
+ u32 wc_lane_config;
struct bnx2x *bp = params->bp;
static struct bnx2x_reg_set reg_set[] = {
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3713,7 +3717,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
};
DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
/* Set to default registers that may be overriden by 10G force */
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -3728,7 +3732,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
if (((vars->line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
(vars->line_speed == SPEED_1000)) {
- u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
+ u16 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
an_adv |= (1<<5);
/* Enable CL37 1G Parallel Detect */
@@ -3753,20 +3757,13 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Set Transmit PMD settings */
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
- ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ WC_TX_DRIVER(0x02, 0x06, 0x09));
/* Configure the next lane if dual mode */
if (phy->flags & FLAGS_WC_DUAL_MODE)
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
- ((0x02 <<
- MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x06 <<
- MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x09 <<
- MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ WC_TX_DRIVER(0x02, 0x06, 0x09));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
0x03f0);
@@ -3797,15 +3794,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Advertise pause */
bnx2x_ext_phy_set_pause(params, phy, vars);
- /* Set KR Autoneg Work-Around flag for Warpcore version older than D108
- */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
- if (ucode_ver < 0xd108) {
- DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
- ucode_ver);
- vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
- }
+ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
@@ -3829,6 +3818,33 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
bnx2x_set_aer_mmd(params, phy);
bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+ } else {
+ /* Enable Auto-Detect to support 1G over CL37 as well */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
+ wc_lane_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ shared_hw_config.wc_lane_config));
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val);
+ /* Force cl48 sync_status LOW to avoid getting stuck in CL73
+ * parallel-detect loop when CL73 and CL37 are enabled.
+ */
+ val |= 1 << 11;
+
+ /* Restore Polarity settings in case it was run over by
+ * previous link owner
+ */
+ if (wc_lane_config &
+ (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane))
+ val |= 3 << 2;
+ else
+ val &= ~(3 << 2);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4),
+ val);
+
+ bnx2x_disable_kr2(params, vars, phy);
}
/* Enable Autoneg: only on the main lane */
@@ -3854,7 +3870,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
{MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}
};
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -3909,6 +3925,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 misc1_val, tap_val, tx_driver_val, lane, val;
+ u32 cfg_tap_val, tx_drv_brdct, tx_equal;
+
/* Hold rxSeqStart */
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
@@ -3952,23 +3970,33 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
if (is_xfi) {
misc1_val |= 0x5;
- tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
- tx_driver_val =
- ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
-
+ tap_val = WC_TX_FIR(0x08, 0x37, 0x00);
+ tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03);
} else {
+ cfg_tap_val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].
+ sfi_tap_values));
+
+ tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK;
+
+ tx_drv_brdct = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
+ PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
+
misc1_val |= 0x9;
- tap_val = ((0x0f << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x2b << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x02 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
- tx_driver_val =
- ((0x03 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+
+ /* TAP values are controlled by nvram, if value there isn't 0 */
+ if (tx_equal)
+ tap_val = (u16)tx_equal;
+ else
+ tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02);
+
+ if (tx_drv_brdct)
+ tx_driver_val = WC_TX_DRIVER(0x03, (u16)tx_drv_brdct,
+ 0x06);
+ else
+ tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06);
}
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
@@ -4105,15 +4133,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
/* Set Transmit PMD settings */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX_FIR_TAP,
- ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) |
- MDIO_WC_REG_TX_FIR_TAP_ENABLE));
+ (WC_TX_FIR(0x12, 0x2d, 0x00) |
+ MDIO_WC_REG_TX_FIR_TAP_ENABLE));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
- ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ WC_TX_DRIVER(0x02, 0x02, 0x02));
}
static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
@@ -4242,7 +4266,7 @@ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, (3<<13));
- for (i = 0; i < sizeof(wc_regs)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(wc_regs); i++)
bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg,
wc_regs[i].val);
@@ -4330,20 +4354,14 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u32 serdes_net_if;
u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
- u16 lane = bnx2x_get_warpcore_lane(phy, params);
vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
if (!vars->turn_to_run_wc_rt)
return;
- /* Return if there is no link partner */
- if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
- DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
- return;
- }
-
if (vars->rx_tx_asic_rst) {
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
serdes_net_if = (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_hw_config[params->port].default_cfg)) &
@@ -4358,14 +4376,8 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
/*10G KR*/
lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
- DP(NETIF_MSG_LINK,
- "gp_status1 0x%x\n", gp_status1);
-
if (lnkup_kr || lnkup) {
- vars->rx_tx_asic_rst = 0;
- DP(NETIF_MSG_LINK,
- "link up, rx_tx_asic_rst 0x%x\n",
- vars->rx_tx_asic_rst);
+ vars->rx_tx_asic_rst = 0;
} else {
/* Reset the lane to see if link comes up.*/
bnx2x_warpcore_reset_lane(bp, phy, 1);
@@ -4490,10 +4502,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
* enabled transmitter to avoid current leakage in case
* no module is connected
*/
- if (bnx2x_is_sfp_module_plugged(phy, params))
- bnx2x_sfp_module_detection(phy, params);
- else
- bnx2x_sfp_e3_set_transmitter(params, phy, 1);
+ if ((params->loopback_mode == LOOPBACK_NONE) ||
+ (params->loopback_mode == LOOPBACK_EXT)) {
+ if (bnx2x_is_sfp_module_plugged(phy, params))
+ bnx2x_sfp_module_detection(phy, params);
+ else
+ bnx2x_sfp_e3_set_transmitter(params,
+ phy, 1);
+ }
bnx2x_warpcore_config_sfi(phy, params);
break;
@@ -4748,6 +4764,12 @@ void bnx2x_link_status_update(struct link_params *params,
vars->link_status = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
port_mb[port].link_status));
+
+ /* Force link UP in non LOOPBACK_EXT loopback mode(s) */
+ if (params->loopback_mode != LOOPBACK_NONE &&
+ params->loopback_mode != LOOPBACK_EXT)
+ vars->link_status |= LINK_STATUS_LINK_UP;
+
if (bnx2x_eee_has_cap(params))
vars->eee_status = REG_RD(bp, params->shmem2_base +
offsetof(struct shmem2_region,
@@ -5734,6 +5756,11 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
duplex);
+ /* In case of KR link down, start up the recovering procedure */
+ if ((!link_up) && (phy->media_type == ETH_PHY_KR) &&
+ (!(phy->flags & FLAGS_WC_DUAL_MODE)))
+ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+
DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
vars->duplex, vars->flow_ctrl, vars->link_status);
return rc;
@@ -6322,9 +6349,15 @@ int bnx2x_set_led(struct link_params *params,
* intended override.
*/
break;
- } else
+ } else {
+ u32 nig_led_mode = ((params->hw_led_mode <<
+ SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY2) ?
+ (SHARED_HW_CFG_LED_PHY1 >>
+ SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- hw_led_mode);
+ nig_led_mode);
+ }
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
/* Set blinking rate to ~15.9Hz */
@@ -6452,7 +6485,6 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
static int bnx2x_link_initialize(struct link_params *params,
struct link_vars *vars)
{
- int rc = 0;
u8 phy_index, non_ext_phy;
struct bnx2x *bp = params->bp;
/* In case of external phy existence, the line speed would be the
@@ -6480,12 +6512,15 @@ static int bnx2x_link_initialize(struct link_params *params,
(CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp)))
bnx2x_set_parallel_detection(phy, params);
- if (params->phy[INT_PHY].config_init)
- params->phy[INT_PHY].config_init(phy,
- params,
- vars);
+ if (params->phy[INT_PHY].config_init)
+ params->phy[INT_PHY].config_init(phy, params, vars);
}
+ /* Re-read this value in case it was changed inside config_init due to
+ * limitations of optic module
+ */
+ vars->line_speed = params->phy[INT_PHY].req_line_speed;
+
/* Init external phy*/
if (non_ext_phy) {
if (params->phy[INT_PHY].supported &
@@ -6522,7 +6557,7 @@ static int bnx2x_link_initialize(struct link_params *params,
NIG_STATUS_XGXS0_LINK_STATUS |
NIG_STATUS_SERDES0_LINK_STATUS |
NIG_MASK_MI_INT));
- return rc;
+ return 0;
}
static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
@@ -7752,7 +7787,8 @@ static void bnx2x_sfp_set_transmitter(struct link_params *params,
static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8 is_init)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
@@ -7765,7 +7801,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Set the read command byte count */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
- (byte_cnt | 0xa000));
+ (byte_cnt | (dev_addr << 8)));
/* Set the read command address */
bnx2x_cl45_write(bp, phy,
@@ -7839,6 +7875,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
}
static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
+ u8 dev_addr,
u16 addr, u8 byte_cnt,
u8 *o_buf, u8 is_init)
{
@@ -7863,7 +7900,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
usleep_range(1000, 2000);
bnx2x_warpcore_power_module(params, 1);
}
- rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
+ rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt,
data_array);
} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -7879,7 +7916,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8 is_init)
{
struct bnx2x *bp = params->bp;
u16 val, i;
@@ -7890,6 +7928,15 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
+ /* Set 2-wire transfer rate of SFP+ module EEPROM
+ * to 100Khz since some DACs(direct attached cables) do
+ * not work at 400Khz.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+ ((dev_addr << 8) | 1));
+
/* Need to read from 1.8000 to clear it */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
@@ -7962,26 +8009,44 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
-
int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf)
+ struct link_params *params, u8 dev_addr,
+ u16 addr, u16 byte_cnt, u8 *o_buf)
{
- int rc = -EOPNOTSUPP;
+ int rc = 0;
+ struct bnx2x *bp = params->bp;
+ u8 xfer_size;
+ u8 *user_data = o_buf;
+ read_sfp_module_eeprom_func_p read_func;
+
+ if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) {
+ DP(NETIF_MSG_LINK, "invalid dev_addr 0x%x\n", dev_addr);
+ return -EINVAL;
+ }
+
switch (phy->type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
- break;
+ read_func = bnx2x_8726_read_sfp_module_eeprom;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
- rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
- break;
+ read_func = bnx2x_8727_read_sfp_module_eeprom;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf, 0);
- break;
+ read_func = bnx2x_warpcore_read_sfp_module_eeprom;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ while (!rc && (byte_cnt > 0)) {
+ xfer_size = (byte_cnt > SFP_EEPROM_PAGE_SIZE) ?
+ SFP_EEPROM_PAGE_SIZE : byte_cnt;
+ rc = read_func(phy, params, dev_addr, addr, xfer_size,
+ user_data, 0);
+ byte_cnt -= xfer_size;
+ user_data += xfer_size;
+ addr += xfer_size;
}
return rc;
}
@@ -7998,6 +8063,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
/* First check for copper cable */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_CON_TYPE_ADDR,
2,
(u8 *)val) != 0) {
@@ -8015,6 +8081,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
*/
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_FC_TX_TECH_ADDR,
1,
&copper_module_type) != 0) {
@@ -8027,36 +8094,46 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
if (copper_module_type &
SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
- check_limiting_mode = 1;
- } else if (copper_module_type &
- SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ *edc_mode = EDC_MODE_ACTIVE_DAC;
+ else
+ check_limiting_mode = 1;
+ } else {
+ *edc_mode = EDC_MODE_PASSIVE_DAC;
+ /* Even in case PASSIVE_DAC indication is not set,
+ * treat it as a passive DAC cable, since some cables
+ * don't have this indication.
+ */
+ if (copper_module_type &
+ SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
DP(NETIF_MSG_LINK,
"Passive Copper cable detected\n");
- *edc_mode =
- EDC_MODE_PASSIVE_DAC;
- } else {
- DP(NETIF_MSG_LINK,
- "Unknown copper-cable-type 0x%x !!!\n",
- copper_module_type);
- return -EINVAL;
+ } else {
+ DP(NETIF_MSG_LINK,
+ "Unknown copper-cable-type\n");
+ }
}
break;
}
case SFP_EEPROM_CON_TYPE_VAL_LC:
+ case SFP_EEPROM_CON_TYPE_VAL_RJ45:
check_limiting_mode = 1;
if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
SFP_EEPROM_COMP_CODE_LR_MASK |
SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
- DP(NETIF_MSG_LINK, "1G Optic module detected\n");
+ DP(NETIF_MSG_LINK, "1G SFP module detected\n");
gport = params->port;
phy->media_type = ETH_PHY_SFP_1G_FIBER;
- phy->req_line_speed = SPEED_1000;
- if (!CHIP_IS_E1x(bp))
- gport = BP_PATH(bp) + (params->port << 1);
- netdev_err(bp->dev, "Warning: Link speed was forced to 1000Mbps."
- " Current SFP module in port %d is not"
- " compliant with 10G Ethernet\n",
- gport);
+ if (phy->req_line_speed != SPEED_1000) {
+ phy->req_line_speed = SPEED_1000;
+ if (!CHIP_IS_E1x(bp)) {
+ gport = BP_PATH(bp) +
+ (params->port << 1);
+ }
+ netdev_err(bp->dev,
+ "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
+ gport);
+ }
} else {
int idx, cfg_idx = 0;
DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8095,6 +8172,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
u8 options[SFP_EEPROM_OPTIONS_SIZE];
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_OPTIONS_ADDR,
SFP_EEPROM_OPTIONS_SIZE,
options) != 0) {
@@ -8161,6 +8239,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
/* Format the warning message */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_VENDOR_NAME_ADDR,
SFP_EEPROM_VENDOR_NAME_SIZE,
(u8 *)vendor_name))
@@ -8169,6 +8248,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_PART_NO_ADDR,
SFP_EEPROM_PART_NO_SIZE,
(u8 *)vendor_pn))
@@ -8199,12 +8279,13 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
for (timeout = 0; timeout < 60; timeout++) {
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
- rc = bnx2x_warpcore_read_sfp_module_eeprom(phy,
- params, 1,
- 1, &val, 1);
+ rc = bnx2x_warpcore_read_sfp_module_eeprom(
+ phy, params, I2C_DEV_ADDR_A0, 1, 1, &val,
+ 1);
else
- rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1,
- &val);
+ rc = bnx2x_read_sfp_module_eeprom(phy, params,
+ I2C_DEV_ADDR_A0,
+ 1, 1, &val);
if (rc == 0) {
DP(NETIF_MSG_LINK,
"SFP+ module initialization took %d ms\n",
@@ -8213,7 +8294,8 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
}
usleep_range(5000, 10000);
}
- rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val);
+ rc = bnx2x_read_sfp_module_eeprom(phy, params, I2C_DEV_ADDR_A0,
+ 1, 1, &val);
return rc;
}
@@ -8370,15 +8452,6 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
val);
-
- /* Set 2-wire transfer rate of SFP+ module EEPROM
- * to 100Khz since some DACs(direct attached cables) do
- * not work at 400Khz.
- */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
- 0xa001);
break;
default:
DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -8502,6 +8575,7 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
break;
case EDC_MODE_PASSIVE_DAC:
+ case EDC_MODE_ACTIVE_DAC:
mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
break;
default:
@@ -8539,8 +8613,8 @@ static void bnx2x_set_limiting_mode(struct link_params *params,
}
}
-int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
- struct link_params *params)
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 edc_mode;
@@ -8641,7 +8715,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC6,
&rx_tx_in_reset);
- if (!rx_tx_in_reset) {
+ if ((!rx_tx_in_reset) &&
+ (params->link_flags &
+ PHY_INITIALIZED)) {
bnx2x_warpcore_reset_lane(bp, phy, 1);
bnx2x_warpcore_config_sfi(phy, params);
bnx2x_warpcore_reset_lane(bp, phy, 0);
@@ -9520,8 +9596,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
} else {
/* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
/* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set);
- i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad,
reg_set[i].reg, reg_set[i].val);
@@ -9592,7 +9667,7 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL, val);
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -9676,32 +9751,41 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
an_1000_val);
- /* set 100 speed advertisement */
- if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) {
- an_10_100_val |= (1<<7);
- /* Enable autoneg and restart autoneg for legacy speeds */
- autoneg_val |= (1<<9 | 1<<12);
-
- if (phy->req_duplex == DUPLEX_FULL)
+ /* Set 10/100 speed advertisement */
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+ /* Enable autoneg and restart autoneg for legacy speeds
+ */
+ autoneg_val |= (1<<9 | 1<<12);
an_10_100_val |= (1<<8);
- DP(NETIF_MSG_LINK, "Advertising 100M\n");
- }
- /* set 10 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) &&
- (phy->supported &
- (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full)))) {
- an_10_100_val |= (1<<5);
- autoneg_val |= (1<<9 | 1<<12);
- if (phy->req_duplex == DUPLEX_FULL)
+ DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+ }
+
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+ /* Enable autoneg and restart autoneg for legacy speeds
+ */
+ autoneg_val |= (1<<9 | 1<<12);
+ an_10_100_val |= (1<<7);
+ DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+ }
+
+ if ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+ (phy->supported & SUPPORTED_10baseT_Full)) {
an_10_100_val |= (1<<6);
- DP(NETIF_MSG_LINK, "Advertising 10M\n");
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+ }
+
+ if ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) &&
+ (phy->supported & SUPPORTED_10baseT_Half)) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+ }
}
/* Only 10/100 are allowed to work in FORCE mode */
@@ -10273,7 +10357,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
/* Determine if EEE was negotiated */
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
bnx2x_eee_an_resolve(phy, params, vars);
}
@@ -10416,6 +10501,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
0x0);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+ /* Disable MI_INT interrupt before setting LED4
+ * source to constant off.
+ */
+ if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4) &
+ NIG_MASK_MI_INT) {
+ params->link_flags |=
+ LINK_FLAGS_INT_DISABLED;
+
+ bnx2x_bits_dis(
+ bp,
+ NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4,
+ NIG_MASK_MI_INT);
+ }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x0);
+ }
}
break;
case LED_MODE_ON:
@@ -10462,6 +10569,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
0x20);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+ /* Disable MI_INT interrupt before setting LED4
+ * source to constant on.
+ */
+ if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4) &
+ NIG_MASK_MI_INT) {
+ params->link_flags |=
+ LINK_FLAGS_INT_DISABLED;
+
+ bnx2x_bits_dis(
+ bp,
+ NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4,
+ NIG_MASK_MI_INT);
+ }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x20);
+ }
}
break;
@@ -10510,10 +10639,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
0x40);
} else {
+ /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED
+ * sources are all wired through LED1, rather than only
+ * 10G in other modes.
+ */
+ val = ((params->hw_led_mode <<
+ SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80;
+
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
- 0x80);
+ val);
/* Tell LED3 to blink on source */
bnx2x_cl45_read(bp, phy,
@@ -10526,6 +10663,22 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL,
val);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+ /* Restore LED4 source to external link,
+ * and re-enable interrupts.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x40);
+ if (params->link_flags &
+ LINK_FLAGS_INT_DISABLED) {
+ bnx2x_link_int_enable(params);
+ params->link_flags &=
+ ~LINK_FLAGS_INT_DISABLED;
+ }
+ }
}
break;
}
@@ -10653,9 +10806,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
(1<<11));
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
- (phy->req_line_speed == SPEED_1000)) {
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == SPEED_1000)) {
an_1000_val |= (1<<8);
autoneg_val |= (1<<9 | 1<<12);
if (phy->req_duplex == DUPLEX_FULL)
@@ -10671,30 +10824,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
0x09,
&an_1000_val);
- /* Set 100 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
- an_10_100_val |= (1<<7);
- /* Enable autoneg and restart autoneg for legacy speeds */
- autoneg_val |= (1<<9 | 1<<12);
-
- if (phy->req_duplex == DUPLEX_FULL)
- an_10_100_val |= (1<<8);
- DP(NETIF_MSG_LINK, "Advertising 100M\n");
- }
-
- /* Set 10 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
- an_10_100_val |= (1<<5);
- autoneg_val |= (1<<9 | 1<<12);
- if (phy->req_duplex == DUPLEX_FULL)
+ /* Advertise 10/100 link speed */
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
an_10_100_val |= (1<<6);
- DP(NETIF_MSG_LINK, "Advertising 10M\n");
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+ an_10_100_val |= (1<<7);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+ an_10_100_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+ }
}
/* Only 10/100 are allowed to work in FORCE mode */
@@ -11785,6 +11940,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
phy->media_type = ETH_PHY_KR;
phy->flags |= FLAGS_WC_DUAL_MODE;
phy->supported &= (SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_FIBRE |
SUPPORTED_Pause |
@@ -12172,7 +12329,7 @@ static void bnx2x_init_bmac_loopback(struct link_params *params,
bnx2x_xgxs_deassert(params);
- /* set bmac loopback */
+ /* Set bmac loopback */
bnx2x_bmac_enable(params, vars, 1, 1);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@@ -12191,7 +12348,7 @@ static void bnx2x_init_emac_loopback(struct link_params *params,
vars->phy_flags = PHY_XGXS_FLAG;
bnx2x_xgxs_deassert(params);
- /* set bmac loopback */
+ /* Set bmac loopback */
bnx2x_emac_enable(params, vars, 1);
bnx2x_emac_program(params, vars);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@@ -12315,6 +12472,7 @@ static int bnx2x_avoid_link_flap(struct link_params *params,
u32 dont_clear_stat, lfa_sts;
struct bnx2x *bp = params->bp;
+ bnx2x_set_mdio_emac_per_phy(bp, params);
/* Sync the link parameters */
bnx2x_link_status_update(params, vars);
@@ -12451,6 +12609,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
params->req_line_speed[0], params->req_flow_ctrl[0]);
DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
params->req_line_speed[1], params->req_flow_ctrl[1]);
+ DP(NETIF_MSG_LINK, "req_adv_flow_ctrl 0x%x\n", params->req_fc_auto_adv);
vars->link_status = 0;
vars->phy_link_up = 0;
vars->link_up = 0;
@@ -12459,6 +12618,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
vars->mac_type = MAC_TYPE_NONE;
vars->phy_flags = 0;
+ vars->check_kr2_recovery_cnt = 0;
+ params->link_flags = PHY_INITIALIZED;
/* Driver opens NIG-BRB filters */
bnx2x_set_rx_filter(params, 1);
/* Check if link flap can be avoided */
@@ -12623,6 +12784,7 @@ int bnx2x_lfa_reset(struct link_params *params,
struct bnx2x *bp = params->bp;
vars->link_up = 0;
vars->phy_flags = 0;
+ params->link_flags &= ~PHY_INITIALIZED;
if (!params->lfa_base)
return bnx2x_link_reset(params, vars, 1);
/*
@@ -13007,64 +13169,6 @@ static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
return 0;
}
-static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
- struct bnx2x_phy *phy,
- u8 port)
-{
- u16 val, cnt;
- /* Wait for FW completing its initialization. */
- for (cnt = 0; cnt < 1500; cnt++) {
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, &val);
- if (!(val & (1<<15)))
- break;
- usleep_range(1000, 2000);
- }
- if (cnt >= 1500) {
- DP(NETIF_MSG_LINK, "84833 reset timeout\n");
- return -EINVAL;
- }
-
- /* Put the port in super isolate mode. */
- bnx2x_cl45_read(bp, phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
- val |= MDIO_84833_SUPER_ISOLATE;
- bnx2x_cl45_write(bp, phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
-
- /* Save spirom version */
- bnx2x_save_848xx_spirom_version(phy, bp, port);
- return 0;
-}
-
-int bnx2x_pre_init_phy(struct bnx2x *bp,
- u32 shmem_base,
- u32 shmem2_base,
- u32 chip_id,
- u8 port)
-{
- int rc = 0;
- struct bnx2x_phy phy;
- if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base,
- port, &phy) != 0) {
- DP(NETIF_MSG_LINK, "populate_phy failed\n");
- return -EINVAL;
- }
- bnx2x_set_mdio_clk(bp, chip_id, phy.mdio_ctrl);
- switch (phy.type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
- rc = bnx2x_84833_pre_init_phy(bp, &phy, port);
- break;
- default:
- break;
- }
- return rc;
-}
-
static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
u32 ext_phy_type, u32 chip_id)
@@ -13222,6 +13326,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
old_status, status);
+ /* Do not touch the link in case physical link down */
+ if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
+ return 1;
+
/* a. Update shmem->link_status accordingly
* b. Update link_vars->link_up
*/
@@ -13271,9 +13379,9 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
* a fault, for example, due to break in the TX side of fiber.
*
******************************************************************************/
-int bnx2x_check_half_open_conn(struct link_params *params,
- struct link_vars *vars,
- u8 notify)
+static int bnx2x_check_half_open_conn(struct link_params *params,
+ struct link_vars *vars,
+ u8 notify)
{
struct bnx2x *bp = params->bp;
u32 lss_status = 0;
@@ -13369,42 +13477,6 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
}
}
}
-static void bnx2x_disable_kr2(struct link_params *params,
- struct link_vars *vars,
- struct bnx2x_phy *phy)
-{
- struct bnx2x *bp = params->bp;
- int i;
- static struct bnx2x_reg_set reg_set[] = {
- /* Step 1 - Program the TX/RX alignment markers */
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
- };
- DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
-
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
- bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
- reg_set[i].val);
- vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
- bnx2x_update_link_attr(params, vars->link_attr_sync);
-
- /* Restart AN on leading lane */
- bnx2x_warpcore_restart_AN_KR(phy, params);
-}
-
static void bnx2x_kr2_recovery(struct link_params *params,
struct link_vars *vars,
struct bnx2x_phy *phy)
@@ -13421,11 +13493,24 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
{
struct bnx2x *bp = params->bp;
u16 base_page, next_page, not_kr2_device, lane;
- int sigdet = bnx2x_warpcore_get_sigdet(phy, params);
+ int sigdet;
+ /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
+ * Since some switches tend to reinit the AN process and clear the
+ * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * and recovered many times
+ */
+ if (vars->check_kr2_recovery_cnt > 0) {
+ vars->check_kr2_recovery_cnt--;
+ return;
+ }
+
+ sigdet = bnx2x_warpcore_get_sigdet(phy, params);
if (!sigdet) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
bnx2x_kr2_recovery(params, vars, phy);
+ DP(NETIF_MSG_LINK, "No sigdet\n");
+ }
return;
}
@@ -13440,8 +13525,10 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* CL73 has not begun yet */
if (base_page == 0) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
bnx2x_kr2_recovery(params, vars, phy);
+ DP(NETIF_MSG_LINK, "No BP\n");
+ }
return;
}
@@ -13451,13 +13538,13 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
*/
not_kr2_device = (((base_page & 0x8000) == 0) ||
(((base_page & 0x8000) &&
- ((next_page & 0xe0) == 0x2))));
+ ((next_page & 0xe0) == 0x20))));
/* In case KR2 is already disabled, check if we need to re-enable it */
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
if (!not_kr2_device) {
DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
- next_page);
+ next_page);
bnx2x_kr2_recovery(params, vars, phy);
}
return;
@@ -13467,6 +13554,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* Disable KR2 on both lanes */
DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
bnx2x_disable_kr2(params, vars, phy);
+ /* Restart AN on leading lane */
+ bnx2x_warpcore_restart_AN_KR(phy, params);
return;
}
}
@@ -13489,7 +13578,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
struct bnx2x_phy *phy = &params->phy[INT_PHY];
bnx2x_set_aer_mmd(params, phy);
if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
- (phy->speed_cap_mask & SPEED_20000))
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
bnx2x_check_kr2_wa(params, vars, phy);
bnx2x_check_over_curr(params, vars);
if (vars->rx_tx_asic_rst)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index ee6e7ec8545..389f5f8cb0a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
-/* Copyright 2008-2012 Broadcom Corporation
+/* Copyright 2008-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -41,6 +41,9 @@
#define SPEED_AUTO_NEG 0
#define SPEED_20000 20000
+#define I2C_DEV_ADDR_A0 0xa0
+#define I2C_DEV_ADDR_A2 0xa2
+
#define SFP_EEPROM_PAGE_SIZE 16
#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
@@ -54,6 +57,15 @@
#define SFP_EEPROM_SERIAL_SIZE 16
#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */
#define SFP_EEPROM_DATE_SIZE 6
+#define SFP_EEPROM_DIAG_TYPE_ADDR 0x5c
+#define SFP_EEPROM_DIAG_TYPE_SIZE 1
+#define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2)
+#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
+#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
+
+#define SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e
+#define SFP_EEPROM_A2_CC_DMI_ADDR 0x5f
+
#define PWR_FLT_ERR_MSG_LEN 250
#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -307,7 +319,9 @@ struct link_params {
struct bnx2x *bp;
u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
req_flow_ctrl is set to AUTO */
- u16 rsrv1;
+ u16 link_flags;
+#define LINK_FLAGS_INT_DISABLED (1<<0)
+#define PHY_INITIALIZED (1<<1)
u32 lfa_base;
};
@@ -341,7 +355,8 @@ struct link_vars {
u32 link_status;
u32 eee_status;
u8 fault_detected;
- u8 rsrv1;
+ u8 check_kr2_recovery_cnt;
+#define CHECK_KR2_RECOVERY_CNT 5
u16 periodic_flags;
#define PERIODIC_FLAGS_LINK_EVENT 0x0001
@@ -417,8 +432,8 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf);
+ struct link_params *params, u8 dev_addr,
+ u16 addr, u16 byte_cnt, u8 *o_buf);
void bnx2x_hw_reset_phy(struct link_params *params);
@@ -518,19 +533,11 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
struct bnx2x_ets_params *ets_params);
-/* Read pfc statistic*/
-void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2]);
+
void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
u32 chip_id, u32 shmem_base, u32 shmem2_base,
u8 port);
-int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
- struct link_params *params);
-
void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
-int bnx2x_check_half_open_conn(struct link_params *params,
- struct link_vars *vars, u8 notify);
#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 940ef859dc6..6a8b1453a1b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1,12 +1,12 @@
/* bnx2x_main.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/aer.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -59,6 +60,7 @@
#include "bnx2x_init.h"
#include "bnx2x_init_ops.h"
#include "bnx2x_cmn.h"
+#include "bnx2x_vfpf.h"
#include "bnx2x_dcb.h"
#include "bnx2x_sp.h"
@@ -74,8 +76,6 @@
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
-#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
-
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -94,38 +94,45 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
-
-int num_queues;
-module_param(num_queues, int, 0);
+int bnx2x_num_queues;
+module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
MODULE_PARM_DESC(num_queues,
" Set number of queues (default is as a number of CPUs)");
static int disable_tpa;
-module_param(disable_tpa, int, 0);
+module_param(disable_tpa, int, S_IRUGO);
MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
-#define INT_MODE_INTx 1
-#define INT_MODE_MSI 2
-int int_mode;
-module_param(int_mode, int, 0);
+static int int_mode;
+module_param(int_mode, int, S_IRUGO);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
static int dropless_fc;
-module_param(dropless_fc, int, 0);
+module_param(dropless_fc, int, S_IRUGO);
MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
static int mrrs = -1;
-module_param(mrrs, int, 0);
+module_param(mrrs, int, S_IRUGO);
MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
static int debug;
-module_param(debug, int, 0);
+module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, " Default debug msglevel");
-
-
-struct workqueue_struct *bnx2x_wq;
+static struct workqueue_struct *bnx2x_wq;
+struct workqueue_struct *bnx2x_iov_wq;
+
+struct bnx2x_mac_vals {
+ u32 xmac_addr;
+ u32 xmac_val;
+ u32 emac_addr;
+ u32 emac_val;
+ u32 umac_addr;
+ u32 umac_val;
+ u32 bmac_addr;
+ u32 bmac_val[2];
+};
enum bnx2x_board_type {
BCM57710 = 0,
@@ -133,39 +140,49 @@ enum bnx2x_board_type {
BCM57711E,
BCM57712,
BCM57712_MF,
+ BCM57712_VF,
BCM57800,
BCM57800_MF,
+ BCM57800_VF,
BCM57810,
BCM57810_MF,
- BCM57840_O,
+ BCM57810_VF,
BCM57840_4_10,
BCM57840_2_20,
- BCM57840_MFO,
BCM57840_MF,
+ BCM57840_VF,
BCM57811,
- BCM57811_MF
+ BCM57811_MF,
+ BCM57840_O,
+ BCM57840_MFO,
+ BCM57811_VF
};
/* indexed by board_type, above */
static struct {
char *name;
} board_info[] = {
- { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
- { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
- { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
- { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
- { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
- { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
- { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
- { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
- { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
- { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
+ [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
+ [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
+ [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
+ [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
+ [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
+ [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
+ [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
+ [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
+ [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
+ [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
+ [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
+ [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
+ [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
+ [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
+ [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
+ [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
+ [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
+ [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
+ [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
};
#ifndef PCI_DEVICE_ID_NX2_57710
@@ -183,12 +200,18 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57712_MF
#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57712_VF
+#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57800
#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
#endif
#ifndef PCI_DEVICE_ID_NX2_57800_MF
#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57800_VF
+#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57810
#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
#endif
@@ -198,6 +221,9 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57840_O
#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
#endif
+#ifndef PCI_DEVICE_ID_NX2_57810_VF
+#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57840_4_10
#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
#endif
@@ -210,29 +236,41 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57840_MF
#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_VF
+#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57811
#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
#endif
#ifndef PCI_DEVICE_ID_NX2_57811_MF
#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57811_VF
+#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
+#endif
+
static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
{ 0 }
};
@@ -242,6 +280,12 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
#define BNX2X_PREV_WAIT_NEEDED 1
static DEFINE_SEMAPHORE(bnx2x_prev_sem);
static LIST_HEAD(bnx2x_prev_list);
+
+/* Forward declaration */
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+
/****************************************************************************
* General service functions
****************************************************************************/
@@ -335,6 +379,71 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
#define DMAE_DP_DST_NONE "dst_addr [none]"
+static void bnx2x_dp_dmae(struct bnx2x *bp,
+ struct dmae_command *dmae, int msglvl)
+{
+ u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
+ int i;
+
+ switch (dmae->opcode & DMAE_COMMAND_DST) {
+ case DMAE_CMD_DST_PCI:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ case DMAE_CMD_DST_GRC:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ default:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ }
+
+ for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
+ DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
+ i, *(((u32 *)dmae) + i));
+}
/* copy command into DMAE command memory and set DMAE command go */
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
@@ -385,7 +494,7 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
return opcode;
}
-static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
struct dmae_command *dmae,
u8 src_type, u8 dst_type)
{
@@ -401,30 +510,30 @@ static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
dmae->comp_val = DMAE_COMP_VAL;
}
-/* issue a dmae command over the init-channel and wailt for completion */
-static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
- struct dmae_command *dmae)
+/* issue a dmae command over the init-channel and wait for completion */
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u32 *comp)
{
- u32 *wb_comp = bnx2x_sp(bp, wb_comp);
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
int rc = 0;
- /*
- * Lock the dmae channel. Disable BHs to prevent a dead-lock
+ bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
+
+ /* Lock the dmae channel. Disable BHs to prevent a dead-lock
* as long as this code is called both from syscall context and
* from ndo_set_rx_mode() flow that may be called from BH.
*/
spin_lock_bh(&bp->dmae_lock);
/* reset completion */
- *wb_comp = 0;
+ *comp = 0;
/* post the command on the channel used for initializations */
bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
/* wait for completion */
udelay(5);
- while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
+ while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
if (!cnt ||
(bp->recovery_state != BNX2X_RECOVERY_DONE &&
@@ -436,7 +545,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
cnt--;
udelay(50);
}
- if (*wb_comp & DMAE_PCI_ERR_FLAG) {
+ if (*comp & DMAE_PCI_ERR_FLAG) {
BNX2X_ERR("DMAE PCI error!\n");
rc = DMAE_PCI_ERROR;
}
@@ -449,6 +558,7 @@ unlock:
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
u32 len32)
{
+ int rc;
struct dmae_command dmae;
if (!bp->dmae_ready) {
@@ -472,11 +582,18 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
dmae.len = len32;
/* issue the command and wait for completion */
- bnx2x_issue_dmae_with_comp(bp, &dmae);
+ rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
+ if (rc) {
+ BNX2X_ERR("DMAE returned failure %d\n", rc);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+ }
}
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
{
+ int rc;
struct dmae_command dmae;
if (!bp->dmae_ready) {
@@ -504,7 +621,13 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
dmae.len = len32;
/* issue the command and wait for completion */
- bnx2x_issue_dmae_with_comp(bp, &dmae);
+ rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
+ if (rc) {
+ BNX2X_ERR("DMAE returned failure %d\n", rc);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+ }
}
static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
@@ -640,6 +763,10 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
return rc;
}
+#define MCPR_TRACE_BUFFER_SIZE (0x800)
+#define SCRATCH_BUFFER_SIZE(bp) \
+ (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
+
void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
{
u32 addr, val;
@@ -664,7 +791,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
trace_shmem_base = bp->common.shmem_base;
else
trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
- addr = trace_shmem_base - 0x800;
+
+ /* sanity */
+ if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
+ trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
+ SCRATCH_BUFFER_SIZE(bp)) {
+ BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
+ trace_shmem_base);
+ return;
+ }
+
+ addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
/* validate TRCB signature */
mark = REG_RD(bp, addr);
@@ -676,17 +813,24 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
/* read cyclic buffer pointer */
addr += 4;
mark = REG_RD(bp, addr);
- mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
- + ((mark + 0x3) & ~0x3) - 0x08000000;
+ mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
+ if (mark >= trace_shmem_base || mark < addr + 4) {
+ BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
+ return;
+ }
printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
printk("%s", lvl);
- for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
+
+ /* dump buffer after the mark */
+ for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
pr_cont("%s", (char *)data);
}
+
+ /* dump buffer before the mark */
for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, offset + 4*word));
@@ -701,7 +845,71 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
bnx2x_fw_dump_lvl(bp, KERN_ERR);
}
-void bnx2x_panic_dump(struct bnx2x *bp)
+static void bnx2x_hc_int_disable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ u32 val = REG_RD(bp, addr);
+
+ /* in E1 we must use only PCI configuration space to disable
+ * MSI/MSIX capability
+ * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
+ */
+ if (CHIP_IS_E1(bp)) {
+ /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
+ * Use mask register to prevent from HC sending interrupts
+ * after we exit the function
+ */
+ REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
+
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+ DP(NETIF_MSG_IFDOWN,
+ "write %x to HC %d (addr 0x%x)\n",
+ val, port, addr);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, addr, val);
+ if (REG_RD(bp, addr) != val)
+ BNX2X_ERR("BUG! Proper val not read from IGU!\n");
+}
+
+static void bnx2x_igu_int_disable(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+ val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN);
+
+ DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
+ BNX2X_ERR("BUG! Proper val not read from IGU!\n");
+}
+
+static void bnx2x_int_disable(struct bnx2x *bp)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_int_disable(bp);
+ else
+ bnx2x_igu_int_disable(bp);
+}
+
+void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
{
int i;
u16 j;
@@ -711,6 +919,8 @@ void bnx2x_panic_dump(struct bnx2x *bp)
u16 start = 0, end = 0;
u8 cos;
#endif
+ if (IS_PF(bp) && disable_int)
+ bnx2x_int_disable(bp);
bp->stats_state = STATS_STATE_DISABLED;
bp->eth_stats.unrecoverable_error++;
@@ -720,34 +930,41 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* Indices */
/* Common */
- BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
- bp->def_idx, bp->def_att_idx, bp->attn_state,
- bp->spq_prod_idx, bp->stats_counter);
- BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
- bp->def_status_blk->atten_status_block.attn_bits,
- bp->def_status_blk->atten_status_block.attn_bits_ack,
- bp->def_status_blk->atten_status_block.status_block_id,
- bp->def_status_blk->atten_status_block.attn_bits_index);
- BNX2X_ERR(" def (");
- for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
- pr_cont("0x%x%s",
- bp->def_status_blk->sp_sb.index_values[i],
- (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
-
- for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
- *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
- i*sizeof(u32));
-
- pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
- sp_sb_data.igu_sb_id,
- sp_sb_data.igu_seg_id,
- sp_sb_data.p_func.pf_id,
- sp_sb_data.p_func.vnic_id,
- sp_sb_data.p_func.vf_id,
- sp_sb_data.p_func.vf_valid,
- sp_sb_data.state);
-
+ if (IS_PF(bp)) {
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ int data_size, cstorm_offset;
+
+ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+ bp->def_idx, bp->def_att_idx, bp->attn_state,
+ bp->spq_prod_idx, bp->stats_counter);
+ BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
+ def_sb->atten_status_block.attn_bits,
+ def_sb->atten_status_block.attn_bits_ack,
+ def_sb->atten_status_block.status_block_id,
+ def_sb->atten_status_block.attn_bits_index);
+ BNX2X_ERR(" def (");
+ for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
+ pr_cont("0x%x%s",
+ def_sb->sp_sb.index_values[i],
+ (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
+
+ data_size = sizeof(struct hc_sp_status_block_data) /
+ sizeof(u32);
+ cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
+ for (i = 0; i < data_size; i++)
+ *((u32 *)&sp_sb_data + i) =
+ REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
+ i * sizeof(u32));
+
+ pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
+ sp_sb_data.igu_sb_id,
+ sp_sb_data.igu_seg_id,
+ sp_sb_data.p_func.pf_id,
+ sp_sb_data.p_func.vnic_id,
+ sp_sb_data.p_func.vf_id,
+ sp_sb_data.p_func.vf_valid,
+ sp_sb_data.state);
+ }
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -805,6 +1022,11 @@ void bnx2x_panic_dump(struct bnx2x *bp)
pr_cont("0x%x%s",
fp->sb_index_values[j],
(j == loop - 1) ? ")" : " ");
+
+ /* VF cannot access FW refelection for status block */
+ if (IS_VF(bp))
+ continue;
+
/* fw sb data */
data_size = CHIP_IS_E1x(bp) ?
sizeof(struct hc_status_block_data_e1x) :
@@ -847,7 +1069,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
hc_sm_p[j].timer_value);
}
- /* Indecies data */
+ /* Indices data */
for (j = 0; j < loop; j++) {
pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
hc_index_p[j].flags,
@@ -856,6 +1078,20 @@ void bnx2x_panic_dump(struct bnx2x *bp)
}
#ifdef BNX2X_STOP_ON_ERROR
+ if (IS_PF(bp)) {
+ /* event queue */
+ BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
+ for (i = 0; i < NUM_EQ_DESC; i++) {
+ u32 *data = (u32 *)&bp->eq_ring[i].message.data;
+
+ BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
+ i, bp->eq_ring[i].message.opcode,
+ bp->eq_ring[i].message.error);
+ BNX2X_ERR("data: %x %x %x\n",
+ data[0], data[1], data[2]);
+ }
+ }
+
/* Rings */
/* Rx */
for_each_valid_rx_queue(bp, i) {
@@ -920,8 +1156,10 @@ void bnx2x_panic_dump(struct bnx2x *bp)
}
}
#endif
- bnx2x_fw_dump(bp);
- bnx2x_mc_assert(bp);
+ if (IS_PF(bp)) {
+ bnx2x_fw_dump(bp);
+ bnx2x_mc_assert(bp);
+ }
BNX2X_ERR("end crash dump -----------------\n");
}
@@ -931,7 +1169,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
* bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
* initialization.
*/
-#define FLR_WAIT_USEC 10000 /* 10 miliseconds */
+#define FLR_WAIT_USEC 10000 /* 10 milliseconds */
#define FLR_WAIT_INTERVAL 50 /* usec */
#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
@@ -1027,8 +1265,8 @@ static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
return val;
}
-static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
- char *msg, u32 poll_cnt)
+int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+ char *msg, u32 poll_cnt)
{
u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
if (val != 0) {
@@ -1038,7 +1276,8 @@ static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
return 0;
}
-static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
+/* Common routines with VF FLR cleanup */
+u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
{
/* adjust polling timeout */
if (CHIP_REV_IS_EMUL(bp))
@@ -1050,7 +1289,7 @@ static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
return FLR_POLL_CNT;
}
-static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
+void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
{
struct pbf_pN_cmd_regs cmd_regs[] = {
{0, (CHIP_IS_E3B0(bp)) ?
@@ -1109,7 +1348,6 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
-
/* Verify the transmission buffers are flushed P0, P1, P4 */
for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
@@ -1124,12 +1362,9 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
#define OP_GEN_AGG_VECT(index) \
(((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
-
-static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
- u32 poll_cnt)
+int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
{
- struct sdm_op_gen op_gen = {0};
-
+ u32 op_gen_command = 0;
u32 comp_addr = BAR_CSTRORM_INTMEM +
CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
int ret = 0;
@@ -1139,27 +1374,28 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
return 1;
}
- op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
- op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
- op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
- op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
+ op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
+ op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
+ op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
+ op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
- REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
+ REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
BNX2X_ERR("FW final cleanup did not succeed\n");
DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
(REG_RD(bp, comp_addr)));
- ret = 1;
+ bnx2x_panic();
+ return 1;
}
- /* Zero completion for nxt FLR */
+ /* Zero completion for next FLR */
REG_WR(bp, comp_addr, 0);
return ret;
}
-static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
+u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
{
u16 status;
@@ -1171,7 +1407,6 @@ static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
*/
static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
{
-
/* wait for CFC PF usage-counter to zero (includes all the VFs) */
if (bnx2x_flr_clnup_poll_hw_counter(bp,
CFC_REG_NUM_LCIDS_INSIDE_PF,
@@ -1179,7 +1414,6 @@ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
poll_cnt))
return 1;
-
/* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
if (bnx2x_flr_clnup_poll_hw_counter(bp,
DORQ_REG_PF_USAGE_CNT,
@@ -1209,7 +1443,7 @@ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
/* Wait DMAE PF usage counter to zero */
if (bnx2x_flr_clnup_poll_hw_counter(bp,
dmae_reg_go_c[INIT_DMAE_C(bp)],
- "DMAE dommand register timed out",
+ "DMAE command register timed out",
poll_cnt))
return 1;
@@ -1371,26 +1605,31 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
if (msix) {
val &= ~(IGU_PF_CONF_INT_LINE_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
- val |= (IGU_PF_CONF_FUNC_EN |
- IGU_PF_CONF_MSI_MSIX_EN |
+ val |= (IGU_PF_CONF_MSI_MSIX_EN |
IGU_PF_CONF_ATTN_BIT_EN);
if (single_msix)
val |= IGU_PF_CONF_SINGLE_ISR_EN;
} else if (msi) {
val &= ~IGU_PF_CONF_INT_LINE_EN;
- val |= (IGU_PF_CONF_FUNC_EN |
- IGU_PF_CONF_MSI_MSIX_EN |
+ val |= (IGU_PF_CONF_MSI_MSIX_EN |
IGU_PF_CONF_ATTN_BIT_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
} else {
val &= ~IGU_PF_CONF_MSI_MSIX_EN;
- val |= (IGU_PF_CONF_FUNC_EN |
- IGU_PF_CONF_INT_LINE_EN |
+ val |= (IGU_PF_CONF_INT_LINE_EN |
IGU_PF_CONF_ATTN_BIT_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
}
+ /* Clean previous status - need to configure igu prior to ack*/
+ if ((!msix) || single_msix) {
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ bnx2x_ack_int(bp);
+ }
+
+ val |= IGU_PF_CONF_FUNC_EN;
+
DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
@@ -1425,71 +1664,6 @@ void bnx2x_int_enable(struct bnx2x *bp)
bnx2x_igu_int_enable(bp);
}
-static void bnx2x_hc_int_disable(struct bnx2x *bp)
-{
- int port = BP_PORT(bp);
- u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
- u32 val = REG_RD(bp, addr);
-
- /*
- * in E1 we must use only PCI configuration space to disable
- * MSI/MSIX capablility
- * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
- */
- if (CHIP_IS_E1(bp)) {
- /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
- * Use mask register to prevent from HC sending interrupts
- * after we exit the function
- */
- REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
-
- val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
- HC_CONFIG_0_REG_INT_LINE_EN_0 |
- HC_CONFIG_0_REG_ATTN_BIT_EN_0);
- } else
- val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
- HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
- HC_CONFIG_0_REG_INT_LINE_EN_0 |
- HC_CONFIG_0_REG_ATTN_BIT_EN_0);
-
- DP(NETIF_MSG_IFDOWN,
- "write %x to HC %d (addr 0x%x)\n",
- val, port, addr);
-
- /* flush all outstanding writes */
- mmiowb();
-
- REG_WR(bp, addr, val);
- if (REG_RD(bp, addr) != val)
- BNX2X_ERR("BUG! proper val not read from IGU!\n");
-}
-
-static void bnx2x_igu_int_disable(struct bnx2x *bp)
-{
- u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
-
- val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
- IGU_PF_CONF_INT_LINE_EN |
- IGU_PF_CONF_ATTN_BIT_EN);
-
- DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
-
- /* flush all outstanding writes */
- mmiowb();
-
- REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
- if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
- BNX2X_ERR("BUG! proper val not read from IGU!\n");
-}
-
-static void bnx2x_int_disable(struct bnx2x *bp)
-{
- if (bp->common.int_block == INT_BLOCK_HC)
- bnx2x_hc_int_disable(bp);
- else
- bnx2x_igu_int_disable(bp);
-}
-
void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
{
int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@ -1575,11 +1749,11 @@ static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
}
/**
- * bnx2x_trylock_leader_lock- try to aquire a leader lock.
+ * bnx2x_trylock_leader_lock- try to acquire a leader lock.
*
* @bp: driver handle
*
- * Tries to aquire a leader lock for current engine.
+ * Tries to acquire a leader lock for current engine.
*/
static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
{
@@ -1588,6 +1762,24 @@ static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
+/* schedule the sp task and mark that interrupt occurred (runs from ISR) */
+static int bnx2x_schedule_sp_task(struct bnx2x *bp)
+{
+ /* Set the interrupt occurred bit for the sp-task to recognize it
+ * must ack the interrupt and transition according to the IGU
+ * state machine.
+ */
+ atomic_set(&bp->interrupt_occurred, 1);
+
+ /* The sp_task must execute only after this bit
+ * is set, otherwise we will get out of sync and miss all
+ * further interrupts. Hence, the barrier.
+ */
+ smp_wmb();
+
+ /* schedule sp_task to workqueue */
+ return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+}
void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
{
@@ -1602,6 +1794,13 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
fp->index, cid, command, bp->state,
rr_cqe->ramrod_cqe.ramrod_type);
+ /* If cid is within VF range, replace the slowpath object with the
+ * one corresponding to this VF
+ */
+ if (cid >= BNX2X_FIRST_VF_CID &&
+ cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
+ bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
+
switch (command) {
case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
@@ -1624,7 +1823,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
break;
case (RAMROD_CMD_ID_ETH_TERMINATE):
- DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid);
+ DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
drv_cmd = BNX2X_Q_CMD_TERMINATE;
break;
@@ -1633,6 +1832,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
drv_cmd = BNX2X_Q_CMD_EMPTY;
break;
+ case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
+ DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
+ drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
+ break;
+
default:
BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
command, fp->index);
@@ -1654,10 +1858,10 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
return;
#endif
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_inc(&bp->cq_spq_left);
/* push the change in bp->spq_left and towards the memory */
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
@@ -1669,31 +1873,22 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
* mark pending ACK to MCP bit.
* prevent case that both bits are cleared.
* At the end of load/unload driver checks that
- * sp_state is cleaerd, and this order prevents
+ * sp_state is cleared, and this order prevents
* races
*/
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
wmb();
clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
- /* schedule workqueue to send ack to MCP */
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+ /* schedule the sp task as mcp ack is required */
+ bnx2x_schedule_sp_task(bp);
}
return;
}
-void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
-{
- u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
-
- bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
- start);
-}
-
irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
{
struct bnx2x *bp = netdev_priv(dev_instance);
@@ -1720,7 +1915,6 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
if (status & mask) {
/* Handle Rx or Tx according to SB id */
- prefetch(fp->rx_cons_sb);
for_each_cos_in_tx_queue(fp, cos)
prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
@@ -1734,21 +1928,23 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
if (status & (mask | 0x1)) {
struct cnic_ops *c_ops = NULL;
- if (likely(bp->state == BNX2X_STATE_OPEN)) {
- rcu_read_lock();
- c_ops = rcu_dereference(bp->cnic_ops);
- if (c_ops)
- c_ops->cnic_handler(bp->cnic_data,
- NULL);
- rcu_read_unlock();
- }
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops && (bp->cnic_eth_dev.drv_state &
+ CNIC_DRV_STATE_HANDLES_IRQ))
+ c_ops->cnic_handler(bp->cnic_data, NULL);
+ rcu_read_unlock();
status &= ~mask;
}
}
if (unlikely(status & 0x1)) {
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+
+ /* schedule sp task to perform default status block work, ack
+ * attentions and enable interrupts.
+ */
+ bnx2x_schedule_sp_task(bp);
status &= ~0x1;
if (!status)
@@ -1806,7 +2002,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
if (lock_status & resource_bit)
return 0;
- msleep(5);
+ usleep_range(5000, 10000);
}
BNX2X_ERR("Timeout\n");
return -EAGAIN;
@@ -1841,8 +2037,8 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
/* Validating that the resource is currently taken */
lock_status = REG_RD(bp, hw_lock_control_reg);
if (!(lock_status & resource_bit)) {
- BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n",
- lock_status, resource_bit);
+ BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
+ lock_status, resource_bit);
return -EFAULT;
}
@@ -1850,7 +2046,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
return 0;
}
-
int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
{
/* The GPIO should be swapped if swap register is set and active */
@@ -2115,6 +2310,23 @@ static void bnx2x_set_requested_fc(struct bnx2x *bp)
bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
}
+static void bnx2x_init_dropless_fc(struct bnx2x *bp)
+{
+ u32 pause_enabled = 0;
+
+ if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ pause_enabled = 1;
+
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
+ pause_enabled);
+ }
+
+ DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
+ pause_enabled ? "enabled" : "disabled");
+}
+
int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
{
int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
@@ -2148,6 +2360,8 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
bnx2x_release_phy_lock(bp);
+ bnx2x_init_dropless_fc(bp);
+
bnx2x_calc_fc_adv(bp);
if (bp->link_vars.link_up) {
@@ -2169,6 +2383,8 @@ void bnx2x_link_set(struct bnx2x *bp)
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
+ bnx2x_init_dropless_fc(bp);
+
bnx2x_calc_fc_adv(bp);
} else
BNX2X_ERR("Bootcode is missing - can not set link\n");
@@ -2206,14 +2422,13 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
return rc;
}
-
/* Calculates the sum of vn_min_rates.
It's needed for further normalizing of the min_rates.
Returns:
sum of vn_min_rates.
or
0 - if all the min_rates are 0.
- In the later case fainess algorithm should be deactivated.
+ In the later case fairness algorithm should be deactivated.
If not all min_rates are zero then those that are zeroes will be set to 1.
*/
static void bnx2x_calc_vn_min(struct bnx2x *bp,
@@ -2278,7 +2493,6 @@ static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
input->vnic_max_rate[vn] = vn_max_rate;
}
-
static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
{
if (CHIP_REV_IS_SLOW(bp))
@@ -2294,7 +2508,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
if (BP_NOMCP(bp))
- return; /* what should be the default bvalue in this case */
+ return; /* what should be the default value in this case */
/* For 2 port configuration the absolute function number formula
* is:
@@ -2332,7 +2546,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
input.port_rate = bp->link_vars.line_speed;
- if (cmng_type == CMNG_FNS_MINMAX) {
+ if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
int vn;
/* read mf conf from shmem */
@@ -2389,6 +2603,21 @@ static void storm_memset_cmng(struct bnx2x *bp,
}
}
+/* init cmng mode in HW according to local configuration */
+void bnx2x_set_local_cmng(struct bnx2x *bp)
+{
+ int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
+
+ if (cmng_fns != CMNG_FNS_NONE) {
+ bnx2x_cmng_fns_init(bp, false, cmng_fns);
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+ } else {
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "single function mode without fairness\n");
+ }
+}
+
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
@@ -2397,20 +2626,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
bnx2x_link_update(&bp->link_params, &bp->link_vars);
- if (bp->link_vars.link_up) {
-
- /* dropless flow control */
- if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
- int port = BP_PORT(bp);
- u32 pause_enabled = 0;
-
- if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
- pause_enabled = 1;
+ bnx2x_init_dropless_fc(bp);
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
- pause_enabled);
- }
+ if (bp->link_vars.link_up) {
if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
struct host_port_stats *pstats;
@@ -2424,17 +2642,8 @@ static void bnx2x_link_attn(struct bnx2x *bp)
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
}
- if (bp->link_vars.link_up && bp->link_vars.line_speed) {
- int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
-
- if (cmng_fns != CMNG_FNS_NONE) {
- bnx2x_cmng_fns_init(bp, false, cmng_fns);
- storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
- } else
- /* rate shaping and fairness are disabled */
- DP(NETIF_MSG_IFUP,
- "single function mode without fairness\n");
- }
+ if (bp->link_vars.link_up && bp->link_vars.line_speed)
+ bnx2x_set_local_cmng(bp);
__bnx2x_link_report(bp);
@@ -2448,23 +2657,55 @@ void bnx2x__link_status_update(struct bnx2x *bp)
return;
/* read updated dcb configuration */
- bnx2x_dcbx_pmf_update(bp);
-
- bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+ if (IS_PF(bp)) {
+ bnx2x_dcbx_pmf_update(bp);
+ bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+ if (bp->link_vars.link_up)
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ else
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ /* indicate link status */
+ bnx2x_link_report(bp);
- if (bp->link_vars.link_up)
+ } else { /* VF */
+ bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ bp->port.advertising[0] = bp->port.supported[0];
+
+ bp->link_params.bp = bp;
+ bp->link_params.port = BP_PORT(bp);
+ bp->link_params.req_duplex[0] = DUPLEX_FULL;
+ bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
+ bp->link_params.req_line_speed[0] = SPEED_10000;
+ bp->link_params.speed_cap_mask[0] = 0x7f0000;
+ bp->link_params.switch_cfg = SWITCH_CFG_10G;
+ bp->link_vars.mac_type = MAC_TYPE_BMAC;
+ bp->link_vars.line_speed = SPEED_10000;
+ bp->link_vars.link_status =
+ (LINK_STATUS_LINK_UP |
+ LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
+ bp->link_vars.link_up = 1;
+ bp->link_vars.duplex = DUPLEX_FULL;
+ bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ __bnx2x_link_report(bp);
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
- else
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
- /* indicate link status */
- bnx2x_link_report(bp);
+ }
}
static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
u16 vlan_val, u8 allowed_prio)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_afex_update_params *f_update_params =
&func_params.params.afex_update;
@@ -2489,7 +2730,7 @@ static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
u16 vif_index, u8 func_bit_map)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_afex_viflists_params *update_params =
&func_params.params.afex_viflists;
int rc;
@@ -2505,7 +2746,7 @@ static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
/* set parameters according to cmd_type */
update_params->afex_vif_list_command = cmd_type;
- update_params->vif_list_index = cpu_to_le16(vif_index);
+ update_params->vif_list_index = vif_index;
update_params->func_bit_map =
(cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
update_params->func_to_clear = 0;
@@ -2728,7 +2969,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
return rc;
}
-
static void storm_memset_func_cfg(struct bnx2x *bp,
struct tstorm_eth_function_common_config *tcfg,
u16 abs_fid)
@@ -2762,7 +3002,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
}
/**
- * bnx2x_get_tx_only_flags - Return common flags
+ * bnx2x_get_common_flags - Return common flags
*
* @bp device handle
* @fp queue handle
@@ -2780,14 +3020,23 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
/* tx only connections collect statistics (on the same index as the
- * parent connection). The statistics are zeroed when the parent
- * connection is initialized.
+ * parent connection). The statistics are zeroed when the parent
+ * connection is initialized.
*/
__set_bit(BNX2X_Q_FLG_STATS, &flags);
if (zero_stats)
__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+ if (bp->flags & TX_SWITCHING)
+ __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
+
+ __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
+ __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
+#endif
return flags;
}
@@ -2827,7 +3076,6 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
if (IS_MF_AFEX(bp))
__set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
-
return flags | bnx2x_get_common_flags(bp, fp, true);
}
@@ -2864,15 +3112,12 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
pause->sge_th_hi + FW_PREFETCH_CNT >
MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
- tpa_agg_size = min_t(u32,
- (min_t(u32, 8, MAX_SKB_FRAGS) *
- SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
+ tpa_agg_size = TPA_AGG_SIZE;
max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
SGE_PAGE_SHIFT;
max_sge = ((max_sge + PAGES_PER_SGE - 1) &
(~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
- sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
- 0xffff);
+ sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
}
/* pause - not for e1 */
@@ -2906,7 +3151,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
* placed on the BD (not including paddings).
*/
rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
- BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
+ BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
rxq_init->cl_qzone_id = fp->cl_qzone_id;
rxq_init->tpa_agg_sz = tpa_agg_size;
@@ -2917,7 +3162,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
/* Maximum number or simultaneous TPA aggregation for this Queue.
*
- * For PF Clients it should be the maximum avaliable number.
+ * For PF Clients it should be the maximum available number.
* VF driver(s) may want to define it to a smaller value.
*/
rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
@@ -2948,7 +3193,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
txq_init->fw_sb_id = fp->fw_sb_id;
/*
- * set the tss leading client id for TX classfication ==
+ * set the tss leading client id for TX classification ==
* leading RSS client id
*/
txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
@@ -3011,7 +3256,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
if (bp->port.pmf)
storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
- /* init Event Queue */
+ /* init Event Queue - PCI bus guarantees correct endianity*/
eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
eq_data.producer = bp->eq_prod;
@@ -3020,7 +3265,6 @@ static void bnx2x_pf_init(struct bnx2x *bp)
storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
}
-
static void bnx2x_e1h_disable(struct bnx2x *bp)
{
int port = BP_PORT(bp);
@@ -3036,7 +3280,7 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
- /* Tx queue should be only reenabled */
+ /* Tx queue should be only re-enabled */
netif_tx_wake_all_queues(bp->dev);
/*
@@ -3051,16 +3295,29 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
{
struct eth_stats_info *ether_stat =
&bp->slowpath->drv_info_to_mcp.ether_stat;
+ struct bnx2x_vlan_mac_obj *mac_obj =
+ &bp->sp_objs->mac_obj;
+ int i;
strlcpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN);
- bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
- DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
- ether_stat->mac_local);
-
+ /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
+ * mac_local field in ether_stat struct. The base address is offset by 2
+ * bytes to account for the field being 8 bytes but a mac address is
+ * only 6 bytes. Likewise, the stride for the get_n_elements function is
+ * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
+ * allocated by the ether_stat struct, so the macs will land in their
+ * proper positions.
+ */
+ for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
+ memset(ether_stat->mac_local + i, 0,
+ sizeof(ether_stat->mac_local[0]));
+ mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local + MAC_PAD, MAC_PAD,
+ ETH_ALEN);
ether_stat->mtu_size = bp->dev->mtu;
-
if (bp->dev->features & NETIF_F_RXCSUM)
ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
if (bp->dev->features & NETIF_F_TSO)
@@ -3071,6 +3328,10 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
ether_stat->txq_size = bp->tx_ring_size;
ether_stat->rxq_size = bp->rx_ring_size;
+
+#ifdef CONFIG_BNX2X_SRIOV
+ ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
+#endif
}
static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
@@ -3082,8 +3343,7 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
if (!CNIC_LOADED(bp))
return;
- memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
- bp->fip_mac, ETH_ALEN);
+ memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
fcoe_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3101,65 +3361,75 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
struct fcoe_statistics_params *fw_fcoe_stat =
&bp->fw_stats_data->fcoe;
- ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
- fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
+ fcoe_stat->rx_bytes_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
- ADD_64(fcoe_stat->rx_bytes_hi,
- fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
- fcoe_stat->rx_bytes_lo,
- fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
- ADD_64(fcoe_stat->rx_bytes_hi,
- fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
- fcoe_stat->rx_bytes_lo,
- fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
- ADD_64(fcoe_stat->rx_bytes_hi,
- fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
- fcoe_stat->rx_bytes_lo,
- fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fcoe_q_tstorm_stats->rcv_ucast_pkts);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_pkts);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fcoe_q_tstorm_stats->rcv_bcast_pkts);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_pkts);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fcoe_q_tstorm_stats->rcv_mcast_pkts);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_pkts);
- ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
- fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
+ fcoe_stat->tx_bytes_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
- ADD_64(fcoe_stat->tx_bytes_hi,
- fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
- fcoe_stat->tx_bytes_lo,
- fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
- ADD_64(fcoe_stat->tx_bytes_hi,
- fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
- fcoe_stat->tx_bytes_lo,
- fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
- ADD_64(fcoe_stat->tx_bytes_hi,
- fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
- fcoe_stat->tx_bytes_lo,
- fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fcoe_q_xstorm_stats->ucast_pkts_sent);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->ucast_pkts_sent);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fcoe_q_xstorm_stats->bcast_pkts_sent);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->bcast_pkts_sent);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fcoe_q_xstorm_stats->mcast_pkts_sent);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->mcast_pkts_sent);
}
/* ask L5 driver to add data to the struct */
@@ -3175,8 +3445,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
if (!CNIC_LOADED(bp))
return;
- memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
- bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+ memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
+ ETH_ALEN);
iscsi_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -3211,10 +3481,15 @@ static void bnx2x_handle_eee_event(struct bnx2x *bp)
bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
}
+#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
+#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
+
static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
{
enum drv_info_opcode op_code;
u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+ bool release = false;
+ int wait;
/* if drv_info version supported by MFW doesn't match - send NACK */
if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
@@ -3225,6 +3500,9 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
DRV_INFO_CONTROL_OP_CODE_SHIFT;
+ /* Must prevent other flows from accessing drv_info_to_mcp */
+ mutex_lock(&bp->drv_info_mutex);
+
memset(&bp->slowpath->drv_info_to_mcp, 0,
sizeof(union drv_info_to_mcp));
@@ -3241,7 +3519,7 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
default:
/* if op code isn't supported - send NACK */
bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
- return;
+ goto out;
}
/* if we got drv_info attn from MFW then these fields are defined in
@@ -3253,6 +3531,106 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+
+ /* Since possible management wants both this and get_driver_version
+ * need to wait until management notifies us it finished utilizing
+ * the buffer.
+ */
+ if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
+ DP(BNX2X_MSG_MCP, "Management does not support indication\n");
+ } else if (!bp->drv_info_mng_owner) {
+ u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
+
+ for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
+ u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
+
+ /* Management is done; need to clear indication */
+ if (indication & bit) {
+ SHMEM2_WR(bp, mfw_drv_indication,
+ indication & ~bit);
+ release = true;
+ break;
+ }
+
+ msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
+ }
+ }
+ if (!release) {
+ DP(BNX2X_MSG_MCP, "Management did not release indication\n");
+ bp->drv_info_mng_owner = true;
+ }
+
+out:
+ mutex_unlock(&bp->drv_info_mutex);
+}
+
+static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
+{
+ u8 vals[4];
+ int i = 0;
+
+ if (bnx2x_format) {
+ i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
+ &vals[0], &vals[1], &vals[2], &vals[3]);
+ if (i > 0)
+ vals[0] -= '0';
+ } else {
+ i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
+ &vals[0], &vals[1], &vals[2], &vals[3]);
+ }
+
+ while (i < 4)
+ vals[i++] = 0;
+
+ return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
+}
+
+void bnx2x_update_mng_version(struct bnx2x *bp)
+{
+ u32 iscsiver = DRV_VER_NOT_LOADED;
+ u32 fcoever = DRV_VER_NOT_LOADED;
+ u32 ethver = DRV_VER_NOT_LOADED;
+ int idx = BP_FW_MB_IDX(bp);
+ u8 *version;
+
+ if (!SHMEM2_HAS(bp, func_os_drv_ver))
+ return;
+
+ mutex_lock(&bp->drv_info_mutex);
+ /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
+ if (bp->drv_info_mng_owner)
+ goto out;
+
+ if (bp->state != BNX2X_STATE_OPEN)
+ goto out;
+
+ /* Parse ethernet driver version */
+ ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+ if (!CNIC_LOADED(bp))
+ goto out;
+
+ /* Try getting storage driver version via cnic */
+ memset(&bp->slowpath->drv_info_to_mcp, 0,
+ sizeof(union drv_info_to_mcp));
+ bnx2x_drv_info_iscsi_stat(bp);
+ version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
+ iscsiver = bnx2x_update_mng_version_utility(version, false);
+
+ memset(&bp->slowpath->drv_info_to_mcp, 0,
+ sizeof(union drv_info_to_mcp));
+ bnx2x_drv_info_fcoe_stat(bp);
+ version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
+ fcoever = bnx2x_update_mng_version_utility(version, false);
+
+out:
+ SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
+ SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
+ SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
+
+ mutex_unlock(&bp->drv_info_mutex);
+
+ DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
+ ethver, iscsiver, fcoever);
}
static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
@@ -3342,10 +3720,8 @@ static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
return true;
else
return false;
-
}
-
/**
* bnx2x_sp_post - place a single command on an SP ring
*
@@ -3397,10 +3773,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
HW_CID(bp, cid));
- type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
-
- type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
- SPE_HDR_FUNCTION_ID);
+ /* In some cases, type may already contain the func-id
+ * mainly in SRIOV related use cases, so we add it here only
+ * if it's not already set.
+ */
+ if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
+ type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
+ SPE_HDR_CONN_TYPE;
+ type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
+ } else {
+ type = cmd_type;
+ }
spe->hdr.type = cpu_to_le16(type);
@@ -3410,14 +3794,13 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
/*
* It's ok if the actual decrement is issued towards the memory
* somewhere between the spin_lock and spin_unlock. Thus no
- * more explict memory barrier is needed.
+ * more explicit memory barrier is needed.
*/
if (common)
atomic_dec(&bp->eq_spq_left);
else
atomic_dec(&bp->cq_spq_left);
-
DP(BNX2X_MSG_SP,
"SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
@@ -3439,15 +3822,14 @@ static int bnx2x_acquire_alr(struct bnx2x *bp)
might_sleep();
for (j = 0; j < 1000; j++) {
- val = (1UL << 31);
- REG_WR(bp, GRCBASE_MCP + 0x9c, val);
- val = REG_RD(bp, GRCBASE_MCP + 0x9c);
- if (val & (1L << 31))
+ REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
+ val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
+ if (val & MCPR_ACCESS_LOCK_LOCK)
break;
- msleep(5);
+ usleep_range(5000, 10000);
}
- if (!(val & (1L << 31))) {
+ if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
BNX2X_ERR("Cannot acquire MCP access lock register\n");
rc = -EBUSY;
}
@@ -3458,7 +3840,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp)
/* release split MCP access lock register */
static void bnx2x_release_alr(struct bnx2x *bp)
{
- REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
+ REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
}
#define BNX2X_DEF_SB_ATT_IDX 0x0001
@@ -3480,7 +3862,7 @@ static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
rc |= BNX2X_DEF_SB_IDX;
}
- /* Do not reorder: indecies reading should complete before handling */
+ /* Do not reorder: indices reading should complete before handling */
barrier();
return rc;
}
@@ -3629,16 +4011,11 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
"Please contact OEM Support for assistance\n");
- /*
- * Scheudle device reset (unload)
+ /* Schedule device reset (unload)
* This is due to some boards consuming sufficient power when driver is
* up to overheat if fan fails.
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
-
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
}
static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3780,6 +4157,11 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_DRV_INFO_REQ)
bnx2x_handle_drv_info_req(bp);
+
+ if (val & DRV_STATUS_VF_DISABLED)
+ bnx2x_schedule_iov_task(bp,
+ BNX2X_IOV_HANDLE_FLR);
+
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
@@ -3904,7 +4286,7 @@ static void bnx2x_clear_reset_global(struct bnx2x *bp)
*/
static bool bnx2x_reset_is_global(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
@@ -3955,7 +4337,7 @@ void bnx2x_set_reset_in_progress(struct bnx2x *bp)
*/
bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
{
- u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
u32 bit = engine ?
BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
@@ -4058,49 +4440,70 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
return val != 0;
}
+static void _print_parity(struct bnx2x *bp, u32 reg)
+{
+ pr_cont(" [0x%08x] ", REG_RD(bp, reg));
+}
+
static void _print_next_block(int idx, const char *blk)
{
pr_cont("%s%s", idx ? ", " : "", blk);
}
-static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
- bool print)
+static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
+ int *par_num, bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- switch (cur_bit) {
- case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "BRB");
- break;
- case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "PARSER");
- break;
- case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "TSDM");
- break;
- case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++,
+ res |= true; /* Each bit is real error! */
+
+ if (print) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+ _print_next_block((*par_num)++, "BRB");
+ _print_parity(bp,
+ BRB1_REG_BRB1_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "PARSER");
+ _print_parity(bp, PRS_REG_PRS_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+ _print_next_block((*par_num)++, "TSDM");
+ _print_parity(bp,
+ TSDM_REG_TSDM_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+ _print_next_block((*par_num)++,
"SEARCHER");
- break;
- case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "TCM");
- break;
- case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "TSEMI");
- break;
- case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "XPB");
- break;
+ _print_parity(bp, SRC_REG_SRC_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+ _print_next_block((*par_num)++, "TCM");
+ _print_parity(bp, TCM_REG_TCM_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "TSEMI");
+ _print_parity(bp,
+ TSEM_REG_TSEM_PRTY_STS_0);
+ _print_parity(bp,
+ TSEM_REG_TSEM_PRTY_STS_1);
+ break;
+ case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+ _print_next_block((*par_num)++, "XPB");
+ _print_parity(bp, GRCBASE_XPB +
+ PB_REG_PB_PRTY_STS);
+ break;
+ }
}
/* Clear the bit */
@@ -4108,84 +4511,142 @@ static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
- bool *global, bool print)
+static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
+ int *par_num, bool *global,
+ bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
+ res |= true; /* Each bit is real error! */
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "PBF");
+ if (print) {
+ _print_next_block((*par_num)++, "PBF");
+ _print_parity(bp, PBF_REG_PBF_PRTY_STS);
+ }
break;
case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "QM");
+ if (print) {
+ _print_next_block((*par_num)++, "QM");
+ _print_parity(bp, QM_REG_QM_PRTY_STS);
+ }
break;
case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "TM");
+ if (print) {
+ _print_next_block((*par_num)++, "TM");
+ _print_parity(bp, TM_REG_TM_PRTY_STS);
+ }
break;
case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "XSDM");
+ if (print) {
+ _print_next_block((*par_num)++, "XSDM");
+ _print_parity(bp,
+ XSDM_REG_XSDM_PRTY_STS);
+ }
break;
case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "XCM");
+ if (print) {
+ _print_next_block((*par_num)++, "XCM");
+ _print_parity(bp, XCM_REG_XCM_PRTY_STS);
+ }
break;
case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "XSEMI");
+ if (print) {
+ _print_next_block((*par_num)++,
+ "XSEMI");
+ _print_parity(bp,
+ XSEM_REG_XSEM_PRTY_STS_0);
+ _print_parity(bp,
+ XSEM_REG_XSEM_PRTY_STS_1);
+ }
break;
case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++,
+ if (print) {
+ _print_next_block((*par_num)++,
"DOORBELLQ");
+ _print_parity(bp,
+ DORQ_REG_DORQ_PRTY_STS);
+ }
break;
case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "NIG");
+ if (print) {
+ _print_next_block((*par_num)++, "NIG");
+ if (CHIP_IS_E1x(bp)) {
+ _print_parity(bp,
+ NIG_REG_NIG_PRTY_STS);
+ } else {
+ _print_parity(bp,
+ NIG_REG_NIG_PRTY_STS_0);
+ _print_parity(bp,
+ NIG_REG_NIG_PRTY_STS_1);
+ }
+ }
break;
case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"VAUX PCI CORE");
*global = true;
break;
case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "DEBUG");
+ if (print) {
+ _print_next_block((*par_num)++,
+ "DEBUG");
+ _print_parity(bp, DBG_REG_DBG_PRTY_STS);
+ }
break;
case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "USDM");
+ if (print) {
+ _print_next_block((*par_num)++, "USDM");
+ _print_parity(bp,
+ USDM_REG_USDM_PRTY_STS);
+ }
break;
case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "UCM");
+ if (print) {
+ _print_next_block((*par_num)++, "UCM");
+ _print_parity(bp, UCM_REG_UCM_PRTY_STS);
+ }
break;
case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "USEMI");
+ if (print) {
+ _print_next_block((*par_num)++,
+ "USEMI");
+ _print_parity(bp,
+ USEM_REG_USEM_PRTY_STS_0);
+ _print_parity(bp,
+ USEM_REG_USEM_PRTY_STS_1);
+ }
break;
case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "UPB");
+ if (print) {
+ _print_next_block((*par_num)++, "UPB");
+ _print_parity(bp, GRCBASE_UPB +
+ PB_REG_PB_PRTY_STS);
+ }
break;
case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "CSDM");
+ if (print) {
+ _print_next_block((*par_num)++, "CSDM");
+ _print_parity(bp,
+ CSDM_REG_CSDM_PRTY_STS);
+ }
break;
case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "CCM");
+ if (print) {
+ _print_next_block((*par_num)++, "CCM");
+ _print_parity(bp, CCM_REG_CCM_PRTY_STS);
+ }
break;
}
@@ -4194,51 +4655,73 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
- bool print)
+static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
+ int *par_num, bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- switch (cur_bit) {
- case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "CSEMI");
- break;
- case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "PXP");
- break;
- case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++,
- "PXPPCICLOCKCLIENT");
- break;
- case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "CFC");
- break;
- case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "CDU");
- break;
- case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "DMAE");
- break;
- case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "IGU");
- break;
- case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "MISC");
- break;
+ res |= true; /* Each bit is real error! */
+ if (print) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "CSEMI");
+ _print_parity(bp,
+ CSEM_REG_CSEM_PRTY_STS_0);
+ _print_parity(bp,
+ CSEM_REG_CSEM_PRTY_STS_1);
+ break;
+ case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+ _print_next_block((*par_num)++, "PXP");
+ _print_parity(bp, PXP_REG_PXP_PRTY_STS);
+ _print_parity(bp,
+ PXP2_REG_PXP2_PRTY_STS_0);
+ _print_parity(bp,
+ PXP2_REG_PXP2_PRTY_STS_1);
+ break;
+ case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "PXPPCICLOCKCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+ _print_next_block((*par_num)++, "CFC");
+ _print_parity(bp,
+ CFC_REG_CFC_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+ _print_next_block((*par_num)++, "CDU");
+ _print_parity(bp, CDU_REG_CDU_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+ _print_next_block((*par_num)++, "DMAE");
+ _print_parity(bp,
+ DMAE_REG_DMAE_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+ _print_next_block((*par_num)++, "IGU");
+ if (CHIP_IS_E1x(bp))
+ _print_parity(bp,
+ HC_REG_HC_PRTY_STS);
+ else
+ _print_parity(bp,
+ IGU_REG_IGU_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+ _print_next_block((*par_num)++, "MISC");
+ _print_parity(bp,
+ MISC_REG_MISC_PRTY_STS);
+ break;
+ }
}
/* Clear the bit */
@@ -4246,40 +4729,49 @@ static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
- bool *global, bool print)
+static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
+ int *par_num, bool *global,
+ bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ bool res = false;
+ u32 cur_bit;
+ int i;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
if (print)
- _print_next_block(par_num++, "MCP ROM");
+ _print_next_block((*par_num)++,
+ "MCP ROM");
*global = true;
+ res |= true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"MCP UMP RX");
*global = true;
+ res |= true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"MCP UMP TX");
*global = true;
+ res |= true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"MCP SCPAD");
- *global = true;
+ /* clear latched SCPAD PATIRY from MCP */
+ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
+ 1UL << 10);
break;
}
@@ -4288,39 +4780,50 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
- bool print)
+static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
+ int *par_num, bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- switch (cur_bit) {
- case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "PGLUE_B");
- break;
- case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++, "ATC");
- break;
+ res |= true; /* Each bit is real error! */
+ if (print) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "PGLUE_B");
+ _print_parity(bp,
+ PGLUE_B_REG_PGLUE_B_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
+ _print_next_block((*par_num)++, "ATC");
+ _print_parity(bp,
+ ATC_REG_ATC_PRTY_STS);
+ break;
+ }
}
-
/* Clear the bit */
sig &= ~cur_bit;
}
}
- return par_num;
+ return res;
}
static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
u32 *sig)
{
+ bool res = false;
+
if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
(sig[1] & HW_PRTY_ASSERT_SET_1) ||
(sig[2] & HW_PRTY_ASSERT_SET_2) ||
@@ -4337,23 +4840,22 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
if (print)
netdev_err(bp->dev,
"Parity errors detected in blocks: ");
- par_num = bnx2x_check_blocks_with_parity0(
- sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
- par_num = bnx2x_check_blocks_with_parity1(
- sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
- par_num = bnx2x_check_blocks_with_parity2(
- sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
- par_num = bnx2x_check_blocks_with_parity3(
- sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
- par_num = bnx2x_check_blocks_with_parity4(
- sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
+ res |= bnx2x_check_blocks_with_parity0(bp,
+ sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
+ res |= bnx2x_check_blocks_with_parity1(bp,
+ sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
+ res |= bnx2x_check_blocks_with_parity2(bp,
+ sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
+ res |= bnx2x_check_blocks_with_parity3(bp,
+ sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
+ res |= bnx2x_check_blocks_with_parity4(bp,
+ sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
if (print)
pr_cont("\n");
+ }
- return true;
- } else
- return false;
+ return res;
}
/**
@@ -4380,6 +4882,14 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
attn.sig[3] = REG_RD(bp,
MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
port*4);
+ /* Since MCP attentions can't be disabled inside the block, we need to
+ * read AEU registers to see whether they're currently disabled
+ */
+ attn.sig[3] &= ((REG_RD(bp,
+ !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
+ : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
+ MISC_AEU_ENABLE_MCP_PRTY_BITS) |
+ ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
if (!CHIP_IS_E1x(bp))
attn.sig[4] = REG_RD(bp,
@@ -4389,7 +4899,6 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
return bnx2x_parity_attn(bp, global, print, attn.sig);
}
-
static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
{
u32 val;
@@ -4441,7 +4950,6 @@ static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
(u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
}
-
}
static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
@@ -4576,8 +5084,8 @@ static void bnx2x_attn_int(struct bnx2x *bp)
void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
u16 index, u8 op, u8 update)
{
- u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
-
+ u32 igu_addr = bp->igu_base_addr;
+ igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
igu_addr);
}
@@ -4605,7 +5113,7 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
cid);
- bnx2x_panic_dump(bp);
+ bnx2x_panic_dump(bp, false);
}
bnx2x_cnic_cfc_comp(bp, cid, err);
return 0;
@@ -4647,7 +5155,8 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
/* Always push next commands out, don't wait here */
__set_bit(RAMROD_CONT, &ramrod_flags);
- switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+ switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
+ >> BNX2X_SWCID_SHIFT) {
case BNX2X_FILTER_MAC_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
@@ -4675,7 +5184,6 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
else if (rc > 0)
DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
-
}
static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
@@ -4724,7 +5232,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
struct bnx2x_queue_update_params *q_update_params =
&queue_params.params.update;
- /* Send Q update command with afex vlan removal values for all Qs */
+ /* Send Q update command with afex vlan removal values for all Qs */
queue_params.cmd = BNX2X_Q_CMD_UPDATE;
/* set silent vlan removal values according to vlan mode */
@@ -4756,7 +5264,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
q);
}
- if (!NO_FCOE(bp)) {
+ if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
fp = &bp->fp[FCOE_IDX(bp)];
queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
@@ -4764,9 +5272,9 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
__clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
/* mark latest Q bit */
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
/* send Q update ramrod for FCoE Q */
rc = bnx2x_queue_state_change(bp, &queue_params);
@@ -4798,7 +5306,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
u8 echo;
u32 cid;
u8 opcode;
- int spqe_cnt = 0;
+ int rc, spqe_cnt = 0;
struct bnx2x_queue_sp_obj *q_obj;
struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
@@ -4806,7 +5314,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
hw_cons = le16_to_cpu(*bp->eq_cons_sb);
/* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
- * when we get the the next-page we nned to adjust so the loop
+ * when we get the next-page we need to adjust so the loop
* condition below will be met. The next element is the size of a
* regular element and hence incrementing by 1
*/
@@ -4826,19 +5334,31 @@ static void bnx2x_eq_int(struct bnx2x *bp)
for (; sw_cons != hw_cons;
sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
-
elem = &bp->eq_ring[EQ_DESC(sw_cons)];
- cid = SW_CID(elem->message.data.cfc_del_event.cid);
- opcode = elem->message.opcode;
+ rc = bnx2x_iov_eq_sp_event(bp, elem);
+ if (!rc) {
+ DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
+ rc);
+ goto next_spqe;
+ }
+ /* elem CID originates from FW; actually LE */
+ cid = SW_CID((__force __le32)
+ elem->message.data.cfc_del_event.cid);
+ opcode = elem->message.opcode;
/* handle eq element */
switch (opcode) {
+ case EVENT_RING_OPCODE_VF_PF_CHANNEL:
+ bnx2x_vf_mbx_schedule(bp,
+ &elem->message.data.vf_pf_event);
+ continue;
+
case EVENT_RING_OPCODE_STAT_QUERY:
- DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
- "got statistics comp event %d\n",
- bp->stats_comp++);
+ DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
+ "got statistics comp event %d\n",
+ bp->stats_comp++);
/* nothing to do with stats comp */
goto next_spqe;
@@ -4860,24 +5380,22 @@ static void bnx2x_eq_int(struct bnx2x *bp)
if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
break;
-
-
goto next_spqe;
case EVENT_RING_OPCODE_STOP_TRAFFIC:
DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
if (f_obj->complete_cmd(bp, f_obj,
BNX2X_F_CMD_TX_STOP))
break;
- bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
goto next_spqe;
case EVENT_RING_OPCODE_START_TRAFFIC:
DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
if (f_obj->complete_cmd(bp, f_obj,
BNX2X_F_CMD_TX_START))
break;
- bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
goto next_spqe;
case EVENT_RING_OPCODE_FUNCTION_UPDATE:
@@ -4890,6 +5408,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
break;
} else {
+ int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
+
DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
"AFEX: ramrod completed FUNCTION_UPDATE\n");
f_obj->complete_cmd(bp, f_obj,
@@ -4899,12 +5419,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
* sp_rtnl task as all Queue SP operations
* should run under rtnl_lock.
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
-
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, cmd, 0);
}
goto next_spqe;
@@ -4985,7 +5500,7 @@ next_spqe:
spqe_cnt++;
} /* for */
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_add(spqe_cnt, &bp->eq_spq_left);
bp->eq_cons = sw_cons;
@@ -5000,50 +5515,57 @@ next_spqe:
static void bnx2x_sp_task(struct work_struct *work)
{
struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
- u16 status;
- status = bnx2x_update_dsb_idx(bp);
-/* if (status == 0) */
-/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
+ DP(BNX2X_MSG_SP, "sp task invoked\n");
- DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status);
+ /* make sure the atomic interrupt_occurred has been written */
+ smp_rmb();
+ if (atomic_read(&bp->interrupt_occurred)) {
- /* HW attentions */
- if (status & BNX2X_DEF_SB_ATT_IDX) {
- bnx2x_attn_int(bp);
- status &= ~BNX2X_DEF_SB_ATT_IDX;
- }
+ /* what work needs to be performed? */
+ u16 status = bnx2x_update_dsb_idx(bp);
- /* SP events: STAT_QUERY and others */
- if (status & BNX2X_DEF_SB_IDX) {
- struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+ DP(BNX2X_MSG_SP, "status %x\n", status);
+ DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
+ atomic_set(&bp->interrupt_occurred, 0);
- if (FCOE_INIT(bp) &&
- (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
- /*
- * Prevent local bottom-halves from running as
- * we are going to change the local NAPI list.
- */
- local_bh_disable();
- napi_schedule(&bnx2x_fcoe(bp, napi));
- local_bh_enable();
+ /* HW attentions */
+ if (status & BNX2X_DEF_SB_ATT_IDX) {
+ bnx2x_attn_int(bp);
+ status &= ~BNX2X_DEF_SB_ATT_IDX;
}
- /* Handle EQ completions */
- bnx2x_eq_int(bp);
+ /* SP events: STAT_QUERY and others */
+ if (status & BNX2X_DEF_SB_IDX) {
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+
+ if (FCOE_INIT(bp) &&
+ (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ /* Prevent local bottom-halves from running as
+ * we are going to change the local NAPI list.
+ */
+ local_bh_disable();
+ napi_schedule(&bnx2x_fcoe(bp, napi));
+ local_bh_enable();
+ }
- bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
- le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+ /* Handle EQ completions */
+ bnx2x_eq_int(bp);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
+ le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
- status &= ~BNX2X_DEF_SB_IDX;
- }
+ status &= ~BNX2X_DEF_SB_IDX;
+ }
- if (unlikely(status))
- DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n",
- status);
+ /* if status is non zero then perhaps something went wrong */
+ if (unlikely(status))
+ DP(BNX2X_MSG_SP,
+ "got an unknown interrupt! (status 0x%x)\n", status);
- bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
- le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+ /* ack status block only if something was actually handled */
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
+ le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+ }
/* afex - poll to check if VIFSET_ACK should be sent to MFW */
if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
@@ -5076,21 +5598,22 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
rcu_read_unlock();
}
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+ /* schedule sp task to perform default status block work, ack
+ * attentions and enable interrupts.
+ */
+ bnx2x_schedule_sp_task(bp);
return IRQ_HANDLED;
}
/* end of slow path */
-
void bnx2x_drv_pulse(struct bnx2x *bp)
{
SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
bp->fw_drv_pulse_wr_seq);
}
-
static void bnx2x_timer(unsigned long data)
{
struct bnx2x *bp = (struct bnx2x *) data;
@@ -5098,33 +5621,36 @@ static void bnx2x_timer(unsigned long data)
if (!netif_running(bp->dev))
return;
- if (!BP_NOMCP(bp)) {
+ if (IS_PF(bp) &&
+ !BP_NOMCP(bp)) {
int mb_idx = BP_FW_MB_IDX(bp);
- u32 drv_pulse;
- u32 mcp_pulse;
+ u16 drv_pulse;
+ u16 mcp_pulse;
++bp->fw_drv_pulse_wr_seq;
bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
- /* TBD - add SYSTEM_TIME */
drv_pulse = bp->fw_drv_pulse_wr_seq;
bnx2x_drv_pulse(bp);
mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
MCP_PULSE_SEQ_MASK);
/* The delta between driver pulse and mcp response
- * should be 1 (before mcp response) or 0 (after mcp response)
+ * should not get too big. If the MFW is more than 5 pulses
+ * behind, we should worry about it enough to generate an error
+ * log.
*/
- if ((drv_pulse != mcp_pulse) &&
- (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
- /* someone lost a heartbeat... */
- BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+ if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
+ BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
drv_pulse, mcp_pulse);
- }
}
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
+ /* sample pf vf bulletin board for new posts from pf */
+ if (IS_VF(bp))
+ bnx2x_timer_sriov(bp);
+
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -5145,7 +5671,6 @@ static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
else
for (i = 0; i < len; i++)
REG_WR8(bp, addr + i, fill);
-
}
/* helper: writes FP SP data to FW - data_size in dwords */
@@ -5224,10 +5749,8 @@ static void bnx2x_zero_sp_sb(struct bnx2x *bp)
bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
CSTORM_SP_SYNC_BLOCK_SIZE);
-
}
-
static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
int igu_sb_id, int igu_seg_id)
{
@@ -5237,7 +5760,6 @@ static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
hc_sm->time_to_expire = 0xFFFFFFFF;
}
-
/* allocates state machine ids. */
static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
{
@@ -5267,7 +5789,7 @@ static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
}
-static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id)
{
int igu_seg_id;
@@ -5323,7 +5845,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
- /* write indecies to HW */
+ /* write indices to HW - PCI guarantees endianity of regpairs */
bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
}
@@ -5411,6 +5933,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
bnx2x_zero_sp_sb(bp);
+ /* PCI guarantees endianity of regpairs */
sp_sb_data.state = SB_ENABLED;
sp_sb_data.host_sb_addr.lo = U64_LO(section);
sp_sb_data.host_sb_addr.hi = U64_HI(section);
@@ -5462,18 +5985,17 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
bp->eq_cons = 0;
bp->eq_prod = NUM_EQ_DESC;
bp->eq_cons_sb = BNX2X_EQ_INDEX;
- /* we want a warning message before it gets rought... */
+ /* we want a warning message before it gets wrought... */
atomic_set(&bp->eq_spq_left,
min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
}
-
/* called with netif_addr_lock_bh() */
-void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags)
+static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags)
{
struct bnx2x_rx_mode_ramrod_params ramrod_param;
int rc;
@@ -5503,22 +6025,21 @@ void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
rc = bnx2x_config_rx_mode(bp, &ramrod_param);
if (rc < 0) {
BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
- return;
+ return rc;
}
+
+ return 0;
}
-/* called with netif_addr_lock_bh() */
-void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
+ unsigned long *rx_accept_flags,
+ unsigned long *tx_accept_flags)
{
- unsigned long rx_mode_flags = 0, ramrod_flags = 0;
- unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+ /* Clear the flags first */
+ *rx_accept_flags = 0;
+ *tx_accept_flags = 0;
- if (!NO_FCOE(bp))
-
- /* Configure rx_mode of FCoE Queue */
- __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
-
- switch (bp->rx_mode) {
+ switch (rx_mode) {
case BNX2X_RX_MODE_NONE:
/*
* 'drop all' supersedes any accept flags that may have been
@@ -5526,80 +6047,89 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
*/
break;
case BNX2X_RX_MODE_NORMAL:
- __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
break;
case BNX2X_RX_MODE_ALLMULTI:
- __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
break;
case BNX2X_RX_MODE_PROMISC:
- /* According to deffinition of SI mode, iface in promisc mode
+ /* According to definition of SI mode, iface in promisc mode
* should receive matched and unmatched (in resolution of port)
* unicast packets.
*/
- __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
if (IS_MF_SI(bp))
- __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
else
- __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
break;
default:
- BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
- return;
+ BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
+ return -EINVAL;
}
+ /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
}
+ return 0;
+}
+
+/* called with netif_addr_lock_bh() */
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+{
+ unsigned long rx_mode_flags = 0, ramrod_flags = 0;
+ unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+ int rc;
+
+ if (!NO_FCOE(bp))
+ /* Configure rx_mode of FCoE Queue */
+ __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
+
+ rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
+ &tx_accept_flags);
+ if (rc)
+ return rc;
+
__set_bit(RAMROD_RX, &ramrod_flags);
__set_bit(RAMROD_TX, &ramrod_flags);
- bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
- tx_accept_flags, ramrod_flags);
+ return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
+ rx_accept_flags, tx_accept_flags,
+ ramrod_flags);
}
static void bnx2x_init_internal_common(struct bnx2x *bp)
{
int i;
- if (IS_MF_SI(bp))
- /*
- * In switch independent mode, the TSTORM needs to accept
- * packets that failed classification, since approximate match
- * mac addresses aren't written to NIG LLH
- */
- REG_WR8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
- else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
- REG_WR8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
-
/* Zero this manually as its initialization is
currently missing in the initTool */
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -5670,7 +6200,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
/* init shortcut */
fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
- /* Setup SB indicies */
+ /* Setup SB indices */
fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
/* Configure Queue State object */
@@ -5688,6 +6218,13 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
cids[cos] = fp->txdata_ptr[cos]->cid;
}
+ /* nothing more for vf to do here */
+ if (IS_VF(bp))
+ return;
+
+ bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
+ fp->fw_sb_id, fp->igu_sb_id);
+ bnx2x_update_fpsb_idx(fp);
bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
@@ -5697,13 +6234,10 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
*/
bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
- DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
- fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
- fp->igu_sb_id);
- bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
- fp->fw_sb_id, fp->igu_sb_id);
-
- bnx2x_update_fpsb_idx(fp);
+ DP(NETIF_MSG_IFUP,
+ "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
+ fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
}
static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
@@ -5722,6 +6256,8 @@ static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
}
+ *txdata->tx_cons_sb = cpu_to_le16(0);
+
SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
txdata->tx_db.data.zero_fill1 = 0;
txdata->tx_db.data.prod = 0;
@@ -5740,6 +6276,7 @@ static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
for_each_tx_queue_cnic(bp, i)
bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
}
+
static void bnx2x_init_tx_rings(struct bnx2x *bp)
{
int i;
@@ -5750,6 +6287,47 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp)
bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
}
+static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
+{
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+ unsigned long q_type = 0;
+
+ bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
+ bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
+ BNX2X_FCOE_ETH_CL_ID_IDX);
+ bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
+ bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
+ bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
+ bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
+ bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
+ fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
+ fp);
+
+ DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
+
+ /* qZone id equals to FW (per path) client id */
+ bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
+ /* init shortcut */
+ bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
+ bnx2x_rx_ustorm_prods_offset(fp);
+
+ /* Configure Queue State object */
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ /* No multi-CoS for FCoE L2 client */
+ BUG_ON(fp->max_cos != 1);
+
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
+ &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+ DP(NETIF_MSG_IFUP,
+ "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
+ fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
+}
+
void bnx2x_nic_init_cnic(struct bnx2x *bp)
{
if (!NO_FCOE(bp))
@@ -5769,24 +6347,36 @@ void bnx2x_nic_init_cnic(struct bnx2x *bp)
mmiowb();
}
-void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
+void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
{
int i;
+ /* Setup NIC internals and enable interrupts */
for_each_eth_queue(bp, i)
bnx2x_init_eth_fp(bp, i);
- /* Initialize MOD_ABS interrupts */
- bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
- bp->common.shmem_base, bp->common.shmem2_base,
- BP_PORT(bp));
+
/* ensure status block indices were read */
rmb();
-
- bnx2x_init_def_sb(bp);
- bnx2x_update_dsb_idx(bp);
bnx2x_init_rx_rings(bp);
bnx2x_init_tx_rings(bp);
- bnx2x_init_sp_ring(bp);
+
+ if (IS_PF(bp)) {
+ /* Initialize MOD_ABS interrupts */
+ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
+ bp->common.shmem_base,
+ bp->common.shmem2_base, BP_PORT(bp));
+
+ /* initialize the default status block and sp ring */
+ bnx2x_init_def_sb(bp);
+ bnx2x_update_dsb_idx(bp);
+ bnx2x_init_sp_ring(bp);
+ } else {
+ bnx2x_memset_stats(bp);
+ }
+}
+
+void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
+{
bnx2x_init_eq_ring(bp);
bnx2x_init_internal(bp, load_code);
bnx2x_pf_init(bp);
@@ -5804,12 +6394,7 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
AEU_INPUTS_ATTN_BITS_SPIO5);
}
-/* end of nic init */
-
-/*
- * gzip service functions
- */
-
+/* gzip service functions */
static int bnx2x_gunzip_init(struct bnx2x *bp)
{
bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
@@ -5965,7 +6550,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
if (val == 0x10)
break;
- msleep(10);
+ usleep_range(10000, 20000);
count--;
}
if (val != 0x10) {
@@ -5980,7 +6565,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
if (val == 1)
break;
- msleep(10);
+ usleep_range(10000, 20000);
count--;
}
if (val != 0x1) {
@@ -6021,7 +6606,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
if (val == 0xb0)
break;
- msleep(10);
+ usleep_range(10000, 20000);
count--;
}
if (val != 0xb0) {
@@ -6225,49 +6810,6 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
}
-static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
-{
- u32 offset = 0;
-
- if (CHIP_IS_E1(bp))
- return;
- if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
- return;
-
- switch (BP_ABS_FUNC(bp)) {
- case 0:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
- break;
- case 1:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
- break;
- case 2:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
- break;
- case 3:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
- break;
- case 4:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
- break;
- case 5:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
- break;
- case 6:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
- break;
- case 7:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
- break;
- default:
- return;
- }
-
- REG_WR(bp, offset, pretend_func_num);
- REG_RD(bp, offset);
- DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
-}
-
void bnx2x_pf_disable(struct bnx2x *bp)
{
u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
@@ -6311,7 +6853,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
/*
- * take the UNDI lock to protect undi_unload flow from accessing
+ * take the RESET lock to protect undi_unload flow from accessing
* registers while we're resetting the chip
*/
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
@@ -6441,7 +6983,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
* queues with "old" ILT addresses.
* c. PF enable in the PGLC.
* d. Clear the was_error of the PF in the PGLC. (could have
- * occured while driver was down)
+ * occurred while driver was down)
* e. PF enable in the CFC (WEAK + STRONG)
* f. Timers scan enable
* 3. PF driver unload flow:
@@ -6453,7 +6995,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
* stay set)
* f. If this is VNIC 3 of a port then also init
* first_timers_ilt_entry to zero and last_timers_ilt_entry
- * to the last enrty in the ILT.
+ * to the last entry in the ILT.
*
* Notes:
* Currently the PF error in the PGLC is non recoverable.
@@ -6482,7 +7024,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
/* Step 1: set zeroes to all ilt page entries with valid bit on
* Step 2: set the timers first/last ilt entry to point
* to the entire range to prevent ILT range error for 3rd/4th
- * vnic (this code assumes existance of the vnic)
+ * vnic (this code assumes existence of the vnic)
*
* both steps performed by call to bnx2x_ilt_client_init_op()
* with dummy TM client
@@ -6499,7 +7041,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
}
-
REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
@@ -6524,6 +7065,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
+ bnx2x_iov_init_dmae(bp);
+
/* clean the DMAE memory */
bp->dmae_ready = 1;
bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
@@ -6543,7 +7086,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
-
/* QM queues pointers table */
bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
@@ -6555,7 +7097,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
- REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
+
if (!CHIP_REV_IS_SLOW(bp))
/* enable hw interrupt from doorbell Q */
REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
@@ -6782,8 +7324,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
int port = BP_PORT(bp);
int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
u32 low, high;
- u32 val;
-
+ u32 val, reg;
DP(NETIF_MSG_HW, "starting port init port %d\n", port);
@@ -6849,7 +7390,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
BRB1_REG_MAC_GUARANTIED_1 :
BRB1_REG_MAC_GUARANTIED_0), 40);
-
bnx2x_init_block(bp, BLOCK_PRS, init_phase);
if (CHIP_IS_E3B0(bp)) {
if (IS_MF_AFEX(bp)) {
@@ -6921,14 +7461,25 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
/* init aeu_mask_attn_func_0/1:
- * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
- * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
+ * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use
+ * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF
* bits 4-7 are used for "per vn group attention" */
val = IS_MF(bp) ? 0xF7 : 0x7;
/* Enable DCBX attention for all but E1 */
val |= CHIP_IS_E1(bp) ? 0 : 0x10;
REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
+ /* SCPAD_PARITY should NOT trigger close the gates */
+ reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
+ REG_WR(bp, reg,
+ REG_RD(bp, reg) &
+ ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
+ reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
+ REG_WR(bp, reg,
+ REG_RD(bp, reg) &
+ ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
bnx2x_init_block(bp, BLOCK_NIG, init_phase);
if (!CHIP_IS_E1x(bp)) {
@@ -6980,7 +7531,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
}
}
-
/* If SPIO5 is set to generate interrupts, enable it for this port */
val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
if (val & MISC_SPIO_SPIO5) {
@@ -7009,15 +7559,14 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
REG_WR_DMAE(bp, reg, wb_write, 2);
}
-static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
- u8 idu_sb_id, bool is_Pf)
+void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
{
u32 data, ctl, cnt = 100;
u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
u32 sb_bit = 1 << (idu_sb_id%32);
- u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
+ u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
/* Not supported in BC mode */
@@ -7048,7 +7597,6 @@ static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
msleep(20);
-
if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
DP(NETIF_MSG_HW,
"Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
@@ -7068,7 +7616,6 @@ static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
bnx2x_ilt_wr(bp, i, 0);
}
-
static void bnx2x_init_searcher(struct bnx2x *bp)
{
int port = BP_PORT(bp);
@@ -7104,7 +7651,6 @@ static int bnx2x_reset_nic_mode(struct bnx2x *bp)
int rc, i, port = BP_PORT(bp);
int vlan_en = 0, mac_en[NUM_MACS];
-
/* Close input from network */
if (bp->mf_mode == SINGLE_FUNCTION) {
bnx2x_set_rx_filter(&bp->link_params, 0);
@@ -7179,7 +7725,7 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
if (CONFIGURE_NIC_MODE(bp)) {
- /* Configrue searcher as part of function hw init */
+ /* Configure searcher as part of function hw init */
bnx2x_init_searcher(bp);
/* Reset NIC mode */
@@ -7208,8 +7754,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
/* FLR cleanup - hmmm */
if (!CHIP_IS_E1x(bp)) {
rc = bnx2x_pf_flr_clnup(bp);
- if (rc)
+ if (rc) {
+ bnx2x_fw_dump(bp);
return rc;
+ }
}
/* set MSI reconfigure capability */
@@ -7226,12 +7774,21 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
ilt = BP_ILT(bp);
cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
+ if (IS_SRIOV(bp))
+ cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
+ cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
+
+ /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
+ * those of the VFs, so start line should be reset
+ */
+ cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
for (i = 0; i < L2_ILT_LINES(bp); i++) {
ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
ilt->lines[cdu_ilt_start + i].page_mapping =
bp->context[i].cxt_mapping;
ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
}
+
bnx2x_ilt_init_op(bp, INITOP_SET);
if (!CONFIGURE_NIC_MODE(bp)) {
@@ -7241,8 +7798,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
} else {
/* Set NIC mode */
REG_WR(bp, PRS_REG_NIC_MODE, 1);
- DP(NETIF_MSG_IFUP, "NIC MODE configrued\n");
-
+ DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
}
if (!CHIP_IS_E1x(bp)) {
@@ -7304,6 +7860,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, init_phase);
bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+ REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */
+
+ bnx2x_iov_init_dq(bp);
+
bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
bnx2x_init_block(bp, BLOCK_PRS, init_phase);
bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
@@ -7436,7 +7996,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
}
bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
- /* !!! these should become driver const once
+ /* !!! These should become driver const once
rf-tool supports split-68 const */
REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
@@ -7493,7 +8053,6 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
return 0;
}
-
void bnx2x_free_mem_cnic(struct bnx2x *bp)
{
bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
@@ -7512,16 +8071,15 @@ void bnx2x_free_mem(struct bnx2x *bp)
{
int i;
- /* fastpath */
- bnx2x_free_fp_mem(bp);
- /* end of fastpath */
+ BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+ if (IS_VF(bp))
+ return;
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
sizeof(struct host_sp_status_block));
- BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
- bp->fw_stats_data_sz + bp->fw_stats_req_sz);
-
BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
@@ -7536,84 +8094,33 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
-}
-
-static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
-{
- int num_groups;
- int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
-
- /* number of queues for statistics is number of eth queues + FCoE */
- u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
-
- /* Total number of FW statistics requests =
- * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
- * num of queues
- */
- bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
-
-
- /* Request is built from stats_query_header and an array of
- * stats_query_cmd_group each of which contains
- * STATS_QUERY_CMD_COUNT rules. The real number or requests is
- * configured in the stats_query_header.
- */
- num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
- (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
- bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
- num_groups * sizeof(struct stats_query_cmd_group);
-
- /* Data for statistics requests + stats_conter
- *
- * stats_counter holds per-STORM counters that are incremented
- * when STORM has finished with the current request.
- *
- * memory for FCoE offloaded statistics are counted anyway,
- * even if they will not be sent.
- */
- bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
- sizeof(struct per_pf_stats) +
- sizeof(struct fcoe_statistics_params) +
- sizeof(struct per_queue_stats) * num_queue_stats +
- sizeof(struct stats_counter);
-
- BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
- bp->fw_stats_data_sz + bp->fw_stats_req_sz);
-
- /* Set shortcuts */
- bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
- bp->fw_stats_req_mapping = bp->fw_stats_mapping;
-
- bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
- ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
-
- bp->fw_stats_data_mapping = bp->fw_stats_mapping +
- bp->fw_stats_req_sz;
- return 0;
+ BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
-alloc_mem_err:
- BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
- bp->fw_stats_data_sz + bp->fw_stats_req_sz);
- BNX2X_ERR("Can't allocate memory\n");
- return -ENOMEM;
+ bnx2x_iov_free_mem(bp);
}
int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
{
- if (!CHIP_IS_E1x(bp))
+ if (!CHIP_IS_E1x(bp)) {
/* size = the status block + ramrod buffers */
- BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
- &bp->cnic_sb_mapping,
- sizeof(struct
- host_hc_status_block_e1x));
+ bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ if (!bp->cnic_sb.e2_sb)
+ goto alloc_mem_err;
+ } else {
+ bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+ if (!bp->cnic_sb.e1x_sb)
+ goto alloc_mem_err;
+ }
- if (CONFIGURE_NIC_MODE(bp))
- /* allocate searcher T2 table, as it wan't allocated before */
- BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+ if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
+ /* allocate searcher T2 table, as it wasn't allocated before */
+ bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+ if (!bp->t2)
+ goto alloc_mem_err;
+ }
/* write address to which L5 should insert its values */
bp->cnic_eth_dev.addr_drv_info_to_mcp =
@@ -7634,18 +8141,21 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
{
int i, allocated, context_size;
- if (!CONFIGURE_NIC_MODE(bp))
+ if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
/* allocate searcher T2 table */
- BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
-
- BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
- sizeof(struct host_sp_status_block));
+ bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+ if (!bp->t2)
+ goto alloc_mem_err;
+ }
- BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
- sizeof(struct bnx2x_slowpath));
+ bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
+ sizeof(struct host_sp_status_block));
+ if (!bp->def_status_blk)
+ goto alloc_mem_err;
- /* Allocated memory for FW statistics */
- if (bnx2x_alloc_fw_stats_mem(bp))
+ bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
+ sizeof(struct bnx2x_slowpath));
+ if (!bp->slowpath)
goto alloc_mem_err;
/* Allocate memory for CDU context:
@@ -7666,30 +8176,34 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
for (i = 0, allocated = 0; allocated < context_size; i++) {
bp->context[i].size = min(CDU_ILT_PAGE_SZ,
(context_size - allocated));
- BNX2X_PCI_ALLOC(bp->context[i].vcxt,
- &bp->context[i].cxt_mapping,
- bp->context[i].size);
+ bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
+ bp->context[i].size);
+ if (!bp->context[i].vcxt)
+ goto alloc_mem_err;
allocated += bp->context[i].size;
}
- BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
+ bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
+ GFP_KERNEL);
+ if (!bp->ilt->lines)
+ goto alloc_mem_err;
if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
goto alloc_mem_err;
+ if (bnx2x_iov_alloc_mem(bp))
+ goto alloc_mem_err;
+
/* Slow path ring */
- BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
+ bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
+ if (!bp->spq)
+ goto alloc_mem_err;
/* EQ */
- BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
- BCM_PAGE_SIZE * NUM_EQ_PAGES);
-
-
- /* fastpath */
- /* need to be done at the end, since it's self adjusting to amount
- * of memory available for RSS queues
- */
- if (bnx2x_alloc_fp_mem(bp))
+ bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
+ BCM_PAGE_SIZE * NUM_EQ_PAGES);
+ if (!bp->eq_ring)
goto alloc_mem_err;
+
return 0;
alloc_mem_err:
@@ -7763,8 +8277,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
{
- unsigned long ramrod_flags = 0;
-
if (is_zero_ether_addr(bp->dev->dev_addr) &&
(IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
@@ -7772,17 +8284,26 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
return 0;
}
- DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+ if (IS_PF(bp)) {
+ unsigned long ramrod_flags = 0;
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- /* Eth MAC is set on RSS leading client (fp[0]) */
- return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
- set, BNX2X_ETH_MAC, &ramrod_flags);
+ DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
+ &bp->sp_objs->mac_obj, set,
+ BNX2X_ETH_MAC, &ramrod_flags);
+ } else { /* vf */
+ return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
+ bp->fp->index, true);
+ }
}
int bnx2x_setup_leading(struct bnx2x *bp)
{
- return bnx2x_setup_queue(bp, &bp->fp[0], 1);
+ if (IS_PF(bp))
+ return bnx2x_setup_queue(bp, &bp->fp[0], true);
+ else /* VF */
+ return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
}
/**
@@ -7792,43 +8313,55 @@ int bnx2x_setup_leading(struct bnx2x *bp)
*
* In case of MSI-X it will also try to enable MSI-X.
*/
-void bnx2x_set_int_mode(struct bnx2x *bp)
+int bnx2x_set_int_mode(struct bnx2x *bp)
{
+ int rc = 0;
+
+ if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
+ BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
+ return -EINVAL;
+ }
+
switch (int_mode) {
- case INT_MODE_MSI:
+ case BNX2X_INT_MODE_MSIX:
+ /* attempt to enable msix */
+ rc = bnx2x_enable_msix(bp);
+
+ /* msix attained */
+ if (!rc)
+ return 0;
+
+ /* vfs use only msix */
+ if (rc && IS_VF(bp))
+ return rc;
+
+ /* failed to enable multiple MSI-X */
+ BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
+ bp->num_queues,
+ 1 + bp->num_cnic_queues);
+
+ /* falling through... */
+ case BNX2X_INT_MODE_MSI:
bnx2x_enable_msi(bp);
+
/* falling through... */
- case INT_MODE_INTx:
+ case BNX2X_INT_MODE_INTX:
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
BNX2X_DEV_INFO("set number of queues to 1\n");
break;
default:
- /* if we can't use MSI-X we only need one fp,
- * so try to enable MSI-X with the requested number of fp's
- * and fallback to MSI or legacy INTx with one fp
- */
- if (bnx2x_enable_msix(bp) ||
- bp->flags & USING_SINGLE_MSIX_FLAG) {
- /* failed to enable multiple MSI-X */
- BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
- bp->num_queues,
- 1 + bp->num_cnic_queues);
-
- bp->num_queues = 1 + bp->num_cnic_queues;
-
- /* Try to enable MSI */
- if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
- !(bp->flags & DISABLE_MSI_FLAG))
- bnx2x_enable_msi(bp);
- }
- break;
+ BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
+ return -EINVAL;
}
+ return 0;
}
-/* must be called prioir to any HW initializations */
+/* must be called prior to any HW initializations */
static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
{
+ if (IS_SRIOV(bp))
+ return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
return L2_ILT_LINES(bp);
}
@@ -7881,7 +8414,6 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
ilt_client->page_size,
ilt_client->flags,
ilog2(ilt_client->page_size >> 12));
-
}
if (CNIC_SUPPORT(bp)) {
@@ -7937,7 +8469,6 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
{
-
u8 cos;
int cxt_index, cxt_offset;
@@ -7946,7 +8477,7 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
__set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
- /* If HC is supporterd, enable host coalescing in the transition
+ /* If HC is supported, enable host coalescing in the transition
* to INIT state.
*/
__set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
@@ -8018,7 +8549,6 @@ static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return bnx2x_queue_state_change(bp, q_params);
}
-
/**
* bnx2x_setup_queue - setup queue
*
@@ -8067,7 +8597,6 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
DP(NETIF_MSG_IFUP, "init complete\n");
-
/* Now move the Queue to the SETUP state... */
memset(setup_params, 0, sizeof(*setup_params));
@@ -8128,7 +8657,6 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
/* We want to wait for completion in this context */
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
-
/* close tx-only connections */
for (tx_index = FIRST_TX_ONLY_COS_INDEX;
tx_index < fp->max_cos;
@@ -8182,7 +8710,6 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
return bnx2x_queue_state_change(bp, &q_params);
}
-
static void bnx2x_reset_func(struct bnx2x *bp)
{
int port = BP_PORT(bp);
@@ -8211,8 +8738,8 @@ static void bnx2x_reset_func(struct bnx2x *bp)
/* SP SB */
REG_WR8(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
- SB_DISABLED);
+ CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
+ SB_DISABLED);
for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
@@ -8235,7 +8762,7 @@ static void bnx2x_reset_func(struct bnx2x *bp)
* scan to complete
*/
for (i = 0; i < 200; i++) {
- msleep(10);
+ usleep_range(10000, 20000);
if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
break;
}
@@ -8361,6 +8888,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
else if (bp->wol) {
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
u8 *mac_addr = bp->dev->dev_addr;
+ struct pci_dev *pdev = bp->pdev;
u32 val;
u16 pmc;
@@ -8377,9 +8905,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
/* Enable the PME and clear the status */
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
@@ -8393,16 +8921,16 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
int path = BP_PATH(bp);
DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- load_count[path][0]--;
- load_count[path][1 + port]--;
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ bnx2x_load_count[path][0]--;
+ bnx2x_load_count[path][1 + port]--;
DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- if (load_count[path][0] == 0)
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ if (bnx2x_load_count[path][0] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
- else if (load_count[path][1 + port] == 0)
+ else if (bnx2x_load_count[path][1 + port] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
else
reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -8436,14 +8964,14 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
/*
* (assumption: No Attention from MCP at this stage)
- * PMF probably in the middle of TXdisable/enable transaction
+ * PMF probably in the middle of TX disable/enable transaction
* 1. Sync IRS for default SB
- * 2. Sync SP queue - this guarantes us that attention handling started
- * 3. Wait, that TXdisable/enable transaction completes
+ * 2. Sync SP queue - this guarantees us that attention handling started
+ * 3. Wait, that TX disable/enable transaction completes
*
- * 1+2 guranty that if DCBx attention was scheduled it already changed
- * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy
- * received complettion for the transaction the state is TX_STOPPED.
+ * 1+2 guarantee that if DCBx attention was scheduled it already changed
+ * pending bit of transaction from STARTED-->TX_STOPPED, if we already
+ * received completion for the transaction the state is TX_STOPPED.
* State will return to STARTED after completion of TX_STOPPED-->STARTED
* transaction.
*/
@@ -8455,6 +8983,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
synchronize_irq(bp->pdev->irq);
flush_workqueue(bnx2x_wq);
+ flush_workqueue(bnx2x_iov_wq);
while (bnx2x_func_get_state(bp, &bp->func_obj) !=
BNX2X_F_STATE_STARTED && tout--)
@@ -8473,7 +9002,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
struct bnx2x_func_state_params func_params = {NULL};
DP(NETIF_MSG_IFDOWN,
- "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
+ "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
func_params.f_obj = &bp->func_obj;
__set_bit(RAMROD_DRV_CLR_ONLY,
@@ -8513,7 +9042,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
}
/* Give HW time to discard old tx messages */
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
/* Clean all ETH MACs */
rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
@@ -8551,7 +9080,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
netif_addr_unlock_bh(bp->dev);
-
+ bnx2x_iov_chip_cleanup(bp);
/*
* Send the UNLOAD_REQUEST to the MCP. This will return if
@@ -8562,7 +9091,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
/*
* (assumption: No Attention from MCP at this stage)
- * PMF probably in the middle of TXdisable/enable transaction
+ * PMF probably in the middle of TX disable/enable transaction
*/
rc = bnx2x_func_wait_started(bp);
if (rc) {
@@ -8625,7 +9154,6 @@ unload_error:
if (rc)
BNX2X_ERR("HW_RESET failed\n");
-
/* Report UNLOAD_DONE to MCP */
bnx2x_send_unload_done(bp, keep_link);
}
@@ -8678,7 +9206,7 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
(!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
(val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
} else {
- /* Prevent incomming interrupts in IGU */
+ /* Prevent incoming interrupts in IGU */
val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
@@ -8936,7 +9464,7 @@ static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
if (pend_bits == 0)
break;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
} while (cnt-- > 0);
if (cnt <= 0) {
@@ -8953,8 +9481,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
int cnt = 1000;
u32 val = 0;
u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
- u32 tags_63_32 = 0;
-
+ u32 tags_63_32 = 0;
/* Empty the Tetris buffer, wait for 1s */
do {
@@ -8972,7 +9499,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
(pgl_exp_rom2 == 0xffffffff) &&
(!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
break;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
} while (cnt-- > 0);
if (cnt <= 0) {
@@ -8992,7 +9519,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
return -EAGAIN;
-
/* TBD: Indicate that "process kill" is in progress to MCP */
/* Clear "unprepared" bit */
@@ -9005,7 +9531,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
/* Wait for 1ms to empty GLUE and PCI-E core queues,
* PSWHST, GRC and PSWRD Tetris buffer.
*/
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
/* Prepare to chip reset: */
/* MCP */
@@ -9020,6 +9546,10 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
bnx2x_process_kill_chip_reset(bp, global);
barrier();
+ /* clear errors in PGB */
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
+
/* Recover after reset: */
/* MCP */
if (global && bnx2x_reset_mcp_comp(bp, val))
@@ -9180,7 +9710,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
* the first leader that performs a
* leader_reset() reset the global blocks in
* order to clear global attentions. Otherwise
- * the the gates will remain closed for that
+ * the gates will remain closed for that
* engine.
*/
if (load_status ||
@@ -9288,17 +9818,17 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
rtnl_lock();
- if (!netif_running(bp->dev))
- goto sp_rtnl_exit;
+ if (!netif_running(bp->dev)) {
+ rtnl_unlock();
+ return;
+ }
- /* if stop on error is defined no recovery flows should be executed */
+ if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
#ifdef BNX2X_STOP_ON_ERROR
- BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
- "you will need to reboot when done\n");
- goto sp_rtnl_not_reset;
+ BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
+ "you will need to reboot when done\n");
+ goto sp_rtnl_not_reset;
#endif
-
- if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
/*
* Clear all pending SP commands as we are going to reset the
* function anyway.
@@ -9308,10 +9838,17 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bnx2x_parity_recover(bp);
- goto sp_rtnl_exit;
+ rtnl_unlock();
+ return;
}
if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
+#ifdef BNX2X_STOP_ON_ERROR
+ BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
+ "you will need to reboot when done\n");
+ goto sp_rtnl_not_reset;
+#endif
+
/*
* Clear all pending SP commands as we are going to reset the
* function anyway.
@@ -9322,7 +9859,8 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
bnx2x_nic_load(bp, LOAD_NORMAL);
- goto sp_rtnl_exit;
+ rtnl_unlock();
+ return;
}
#ifdef BNX2X_STOP_ON_ERROR
sp_rtnl_not_reset:
@@ -9340,13 +9878,53 @@ sp_rtnl_not_reset:
DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
netif_device_detach(bp->dev);
bnx2x_close(bp->dev);
+ rtnl_unlock();
+ return;
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP,
+ "sending set mcast vf pf channel message from rtnl sp-task\n");
+ bnx2x_vfpf_set_mcast(bp->dev);
+ }
+ if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+ &bp->sp_rtnl_state)){
+ if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
+ bnx2x_tx_disable(bp);
+ BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
+ }
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
+ bnx2x_set_rx_mode_inner(bp);
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ &bp->sp_rtnl_state))
+ bnx2x_pf_set_vfs_vlan(bp);
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
+ bnx2x_dcbx_stop_hw_tx(bp);
+ bnx2x_dcbx_resume_hw_tx(bp);
}
-sp_rtnl_exit:
+ if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
+ &bp->sp_rtnl_state))
+ bnx2x_update_mng_version(bp);
+
+ /* work which needs rtnl lock not-taken (as it takes the lock itself and
+ * can be called from other contexts as well)
+ */
rtnl_unlock();
-}
-/* end of nic load/unload */
+ /* enable SR-IOV if applicable */
+ if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
+ &bp->sp_rtnl_state)) {
+ bnx2x_disable_sriov(bp);
+ bnx2x_enable_sriov(bp);
+ }
+}
static void bnx2x_period_task(struct work_struct *work)
{
@@ -9390,42 +9968,19 @@ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
return base + (BP_ABS_FUNC(bp)) * stride;
}
-static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
-{
- u32 reg = bnx2x_get_pretend_reg(bp);
-
- /* Flush all outstanding writes */
- mmiowb();
-
- /* Pretend to be function 0 */
- REG_WR(bp, reg, 0);
- REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
-
- /* From now we are in the "like-E1" mode */
- bnx2x_int_disable(bp);
-
- /* Flush all outstanding writes */
- mmiowb();
-
- /* Restore the original function */
- REG_WR(bp, reg, BP_ABS_FUNC(bp));
- REG_RD(bp, reg);
-}
-
-static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
-{
- if (CHIP_IS_E1(bp))
- bnx2x_int_disable(bp);
- else
- bnx2x_undi_int_disable_e1h(bp);
-}
-
-static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
+static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
+ struct bnx2x_mac_vals *vals)
{
u32 val, base_addr, offset, mask, reset_reg;
bool mac_stopped = false;
u8 port = BP_PORT(bp);
+ /* reset addresses as they also mark which values were changed */
+ vals->bmac_addr = 0;
+ vals->umac_addr = 0;
+ vals->xmac_addr = 0;
+ vals->emac_addr = 0;
+
reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
if (!CHIP_IS_E3(bp)) {
@@ -9447,14 +10002,17 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
*/
wb_data[0] = REG_RD(bp, base_addr + offset);
wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
+ vals->bmac_addr = base_addr + offset;
+ vals->bmac_val[0] = wb_data[0];
+ vals->bmac_val[1] = wb_data[1];
wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
- REG_WR(bp, base_addr + offset, wb_data[0]);
- REG_WR(bp, base_addr + offset + 0x4, wb_data[1]);
-
+ REG_WR(bp, vals->bmac_addr, wb_data[0]);
+ REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
}
BNX2X_DEV_INFO("Disable emac Rx\n");
- REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0);
-
+ vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
+ vals->emac_val = REG_RD(bp, vals->emac_addr);
+ REG_WR(bp, vals->emac_addr, 0);
mac_stopped = true;
} else {
if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
@@ -9465,21 +10023,24 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
val & ~(1 << 1));
REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
val | (1 << 1));
- REG_WR(bp, base_addr + XMAC_REG_CTRL, 0);
+ vals->xmac_addr = base_addr + XMAC_REG_CTRL;
+ vals->xmac_val = REG_RD(bp, vals->xmac_addr);
+ REG_WR(bp, vals->xmac_addr, 0);
mac_stopped = true;
}
mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
if (mask & reset_reg) {
BNX2X_DEV_INFO("Disable umac Rx\n");
base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
- REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0);
+ vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
+ vals->umac_val = REG_RD(bp, vals->umac_addr);
+ REG_WR(bp, vals->umac_addr, 0);
mac_stopped = true;
}
}
if (mac_stopped)
msleep(20);
-
}
#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
@@ -9487,6 +10048,82 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
+#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
+#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
+#define BCM_5710_UNDI_FW_MF_VERS (0x05)
+#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
+
+static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
+{
+ /* UNDI marks its presence in DORQ -
+ * it initializes CID offset for normal bell to 0x7
+ */
+ if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
+ MISC_REGISTERS_RESET_REG_1_RST_DORQ))
+ return false;
+
+ if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
+ BNX2X_DEV_INFO("UNDI previously loaded\n");
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
+{
+ u8 major, minor, version;
+ u32 fw;
+
+ /* Must check that FW is loaded */
+ if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
+ MISC_REGISTERS_RESET_REG_1_RST_XSEM)) {
+ BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n");
+ return false;
+ }
+
+ /* Read Currently loaded FW version */
+ fw = REG_RD(bp, XSEM_REG_PRAM);
+ major = fw & 0xff;
+ minor = (fw >> 0x8) & 0xff;
+ version = (fw >> 0x10) & 0xff;
+ BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n",
+ fw, major, minor, version);
+
+ if (major > BCM_5710_UNDI_FW_MF_MAJOR)
+ return true;
+
+ if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+ (minor > BCM_5710_UNDI_FW_MF_MINOR))
+ return true;
+
+ if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+ (minor == BCM_5710_UNDI_FW_MF_MINOR) &&
+ (version >= BCM_5710_UNDI_FW_MF_VERS))
+ return true;
+
+ return false;
+}
+
+static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp)
+{
+ int i;
+
+ /* Due to legacy (FW) code, the first function on each engine has a
+ * different offset macro from the rest of the functions.
+ * Setting this for all 8 functions is harmless regardless of whether
+ * this is actually a multi-function device.
+ */
+ for (i = 0; i < 2; i++)
+ REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1);
+
+ for (i = 2; i < 8; i++)
+ REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1);
+
+ BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n");
+}
+
static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
{
u16 rcq, bd;
@@ -9528,22 +10165,48 @@ static struct bnx2x_prev_path_list *
return NULL;
}
+static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
+{
+ struct bnx2x_prev_path_list *tmp_list;
+ int rc;
+
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ return rc;
+ }
+
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ tmp_list->aer = 1;
+ rc = 0;
+ } else {
+ BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
+ BP_PATH(bp));
+ }
+
+ up(&bnx2x_prev_sem);
+
+ return rc;
+}
+
static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
{
struct bnx2x_prev_path_list *tmp_list;
- int rc = false;
+ bool rc = false;
if (down_trylock(&bnx2x_prev_sem))
return false;
- list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
- if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
- bp->pdev->bus->number == tmp_list->bus &&
- BP_PATH(bp) == tmp_list->path) {
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ if (tmp_list->aer) {
+ DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
+ BP_PATH(bp));
+ } else {
rc = true;
BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
BP_PATH(bp));
- break;
}
}
@@ -9552,11 +10215,48 @@ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
return rc;
}
+bool bnx2x_port_after_undi(struct bnx2x *bp)
+{
+ struct bnx2x_prev_path_list *entry;
+ bool val;
+
+ down(&bnx2x_prev_sem);
+
+ entry = bnx2x_prev_path_get_entry(bp);
+ val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
+
+ up(&bnx2x_prev_sem);
+
+ return val;
+}
+
static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
{
struct bnx2x_prev_path_list *tmp_list;
int rc;
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ return rc;
+ }
+
+ /* Check whether the entry for this path already exists */
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ if (!tmp_list->aer) {
+ BNX2X_ERR("Re-Marking the path.\n");
+ } else {
+ DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
+ BP_PATH(bp));
+ tmp_list->aer = 0;
+ }
+ up(&bnx2x_prev_sem);
+ return 0;
+ }
+ up(&bnx2x_prev_sem);
+
+ /* Create an entry for this path and add it */
tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
if (!tmp_list) {
BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
@@ -9566,6 +10266,7 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
tmp_list->bus = bp->pdev->bus->number;
tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
tmp_list->path = BP_PATH(bp);
+ tmp_list->aer = 0;
tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
rc = down_interruptible(&bnx2x_prev_sem);
@@ -9573,8 +10274,8 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
BNX2X_ERR("Received %d when tried to take lock\n", rc);
kfree(tmp_list);
} else {
- BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
- BP_PATH(bp));
+ DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
+ BP_PATH(bp));
list_add(&tmp_list->list, &bnx2x_prev_list);
up(&bnx2x_prev_sem);
}
@@ -9584,11 +10285,8 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
static int bnx2x_do_flr(struct bnx2x *bp)
{
- int i;
- u16 status;
struct pci_dev *dev = bp->pdev;
-
if (CHIP_IS_E1x(bp)) {
BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
return -EINVAL;
@@ -9601,20 +10299,8 @@ static int bnx2x_do_flr(struct bnx2x *bp)
return -EINVAL;
}
- /* Wait for Transaction Pending bit clean */
- for (i = 0; i < 4; i++) {
- if (i)
- msleep((1 << (i - 1)) * 100);
-
- pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto clear;
- }
-
- dev_err(&dev->dev,
- "transaction is not cleared; proceeding with reset anyway\n");
-
-clear:
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
BNX2X_DEV_INFO("Initiating FLR\n");
bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
@@ -9632,11 +10318,17 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
if (bnx2x_prev_is_path_marked(bp))
return bnx2x_prev_mcp_done(bp);
+ BNX2X_DEV_INFO("Path is unmarked\n");
+
+ /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */
+ if (bnx2x_prev_is_after_undi(bp))
+ goto out;
+
/* If function has FLR capabilities, and existing FW version matches
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
*/
- rc = bnx2x_test_firmware_version(bp, false);
+ rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
if (!rc) {
/* fw version is good */
@@ -9652,6 +10344,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
BNX2X_DEV_INFO("Could not FLR\n");
+out:
/* Close the MCP request, return failure*/
rc = bnx2x_prev_mcp_done(bp);
if (!rc)
@@ -9664,12 +10357,16 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
{
u32 reset_reg, tmp_reg = 0, rc;
bool prev_undi = false;
+ struct bnx2x_mac_vals mac_vals;
+
/* It is possible a previous function received 'common' answer,
* but hasn't loaded yet, therefore creating a scenario of
* multiple functions receiving 'common' on the same path.
*/
BNX2X_DEV_INFO("Common unload Flow\n");
+ memset(&mac_vals, 0, sizeof(mac_vals));
+
if (bnx2x_prev_is_path_marked(bp))
return bnx2x_prev_mcp_done(bp);
@@ -9678,23 +10375,26 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* Reset should be performed after BRB is emptied */
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
u32 timer_count = 1000;
+ bool need_write = true;
/* Close the MAC Rx to prevent BRB from filling up */
- bnx2x_prev_unload_close_mac(bp);
+ bnx2x_prev_unload_close_mac(bp, &mac_vals);
- /* Check if the UNDI driver was previously loaded
- * UNDI driver initializes CID offset for normal bell to 0x7
- */
- reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
- if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
- tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
- if (tmp_reg == 0x7) {
- BNX2X_DEV_INFO("UNDI previously loaded\n");
- prev_undi = true;
- /* clear the UNDI indication */
- REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
- }
+ /* close LLH filters towards the BRB */
+ bnx2x_set_rx_filter(&bp->link_params, 0);
+
+ /* Check if the UNDI driver was previously loaded */
+ if (bnx2x_prev_is_after_undi(bp)) {
+ prev_undi = true;
+ /* clear the UNDI indication */
+ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+ /* clear possible idle check errors */
+ REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
}
+ if (!CHIP_IS_E1x(bp))
+ /* block FW from writing to host */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+
/* wait until BRB is empty */
tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
while (timer_count) {
@@ -9712,21 +10412,41 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
else
timer_count--;
- /* If UNDI resides in memory, manually increment it */
- if (prev_undi)
+ /* New UNDI FW supports MF and contains better
+ * cleaning methods - might be redundant but harmless.
+ */
+ if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
+ if (need_write) {
+ bnx2x_prev_unload_undi_mf(bp);
+ need_write = false;
+ }
+ } else if (prev_undi) {
+ /* If UNDI resides in memory,
+ * manually increment it
+ */
bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
-
+ }
udelay(10);
}
if (!timer_count)
BNX2X_ERR("Failed to empty BRB, hope for the best\n");
-
}
/* No packets are in the pipeline, path is ready for reset */
bnx2x_reset_common(bp);
+ if (mac_vals.xmac_addr)
+ REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
+ if (mac_vals.umac_addr)
+ REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
+ if (mac_vals.emac_addr)
+ REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
+ if (mac_vals.bmac_addr) {
+ REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
+ REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
+ }
+
rc = bnx2x_prev_mark_path(bp, prev_undi);
if (rc) {
bnx2x_prev_mcp_done(bp);
@@ -9748,7 +10468,8 @@ static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
if (!CHIP_IS_E1x(bp)) {
u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
- BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
+ DP(BNX2X_MSG_SP,
+ "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
1 << BP_FUNC(bp));
}
@@ -9759,7 +10480,6 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
{
int time_counter = 10;
u32 rc, fw, hw_lock_reg, hw_lock_val;
- struct bnx2x_prev_path_list *prev_list;
BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
/* clear hw from errors which may have resulted from an interrupted
@@ -9772,7 +10492,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
(MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
(MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
- hw_lock_val = (REG_RD(bp, hw_lock_reg));
+ hw_lock_val = REG_RD(bp, hw_lock_reg);
if (hw_lock_val) {
if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
@@ -9787,11 +10507,11 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
BNX2X_DEV_INFO("Release previously held alr\n");
- REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
+ bnx2x_release_alr(bp);
}
-
do {
+ int aer = 0;
/* Lock MCP using an unload request */
fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
if (!fw) {
@@ -9800,12 +10520,23 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
break;
}
- if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
+ rc);
+ } else {
+ /* If Path is marked by EEH, ignore unload status */
+ aer = !!(bnx2x_prev_path_get_entry(bp) &&
+ bnx2x_prev_path_get_entry(bp)->aer);
+ up(&bnx2x_prev_sem);
+ }
+
+ if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
rc = bnx2x_prev_unload_common(bp);
break;
}
- /* non-common reply from MCP night require looping */
+ /* non-common reply from MCP might require looping */
rc = bnx2x_prev_unload_uncommon(bp);
if (rc != BNX2X_PREV_WAIT_NEEDED)
break;
@@ -9814,13 +10545,12 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
} while (--time_counter);
if (!time_counter || rc) {
- BNX2X_ERR("Failed unloading previous driver, aborting\n");
- rc = -EBUSY;
+ BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
+ rc = -EPROBE_DEFER;
}
/* Mark function if its port was used to boot from SAN */
- prev_list = bnx2x_prev_path_get_entry(bp);
- if (prev_list && (prev_list->undi & (1 << BP_PORT(bp))))
+ if (bnx2x_port_after_undi(bp))
bp->link_params.feature_config_flags |=
FEATURE_CONFIG_BOOT_FROM_SAN;
@@ -9840,8 +10570,12 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
id = ((val & 0xffff) << 16);
val = REG_RD(bp, MISC_REG_CHIP_REV);
id |= ((val & 0xf) << 12);
- val = REG_RD(bp, MISC_REG_CHIP_METAL);
- id |= ((val & 0xff) << 4);
+
+ /* Metal is read from PCI regs, but we can't access >=0x400 from
+ * the configuration space (so we need to reg_rd)
+ */
+ val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
+ id |= (((val >> 24) & 0xf) << 4);
val = REG_RD(bp, MISC_REG_BOND_ID);
id |= (val & 0xf);
bp->common.chip_id = id;
@@ -9900,8 +10634,6 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
bnx2x_init_shmem(bp);
-
-
bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
MISC_REG_GENERIC_CR_1 :
MISC_REG_GENERIC_CR_0));
@@ -9976,6 +10708,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
+
+ bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
+ BC_SUPPORTS_RMMOD_CMD : 0;
+
boot_mode = SHMEM_RD(bp,
dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -9994,7 +10730,7 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
break;
}
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+ pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
BNX2X_DEV_INFO("%sWoL capable\n",
@@ -10163,6 +10899,9 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
+ bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
}
BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
@@ -10357,10 +11096,10 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
{
- mac_hi = cpu_to_be16(mac_hi);
- mac_lo = cpu_to_be32(mac_lo);
- memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
- memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
+ __be16 mac_hi_be = cpu_to_be16(mac_hi);
+ __be32 mac_lo_be = cpu_to_be32(mac_lo);
+ memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
+ memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
}
static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
@@ -10377,10 +11116,12 @@ static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->link_params.speed_cap_mask[0] =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].speed_capability_mask);
+ dev_info.port_hw_config[port].speed_capability_mask) &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
bp->link_params.speed_cap_mask[1] =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].speed_capability_mask2);
+ dev_info.port_hw_config[port].speed_capability_mask2) &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
bp->port.link_config[0] =
SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
@@ -10396,6 +11137,13 @@ static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
(config & PORT_FEATURE_WOL_ENABLED));
+ if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
+ PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
+ bp->flags |= NO_ISCSI_FLAG;
+ if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
+ PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
+ bp->flags |= NO_FCOE_FLAG;
+
BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
bp->link_params.lane_config,
bp->link_params.speed_cap_mask[0],
@@ -10464,7 +11212,6 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp)
*/
if (!bp->cnic_eth_dev.max_iscsi_conn)
bp->flags |= no_flags;
-
}
static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
@@ -10481,12 +11228,56 @@ static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
}
+
+static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
+{
+ u8 count = 0;
+
+ if (IS_MF(bp)) {
+ u8 fid;
+
+ /* iterate over absolute function ids for this path: */
+ for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
+ if (IS_MF_SD(bp)) {
+ u32 cfg = MF_CFG_RD(bp,
+ func_mf_config[fid].config);
+
+ if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
+ ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
+ FUNC_MF_CFG_PROTOCOL_FCOE))
+ count++;
+ } else {
+ u32 cfg = MF_CFG_RD(bp,
+ func_ext_config[fid].
+ func_cfg);
+
+ if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
+ (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
+ count++;
+ }
+ }
+ } else { /* SF */
+ int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
+
+ for (port = 0; port < port_cnt; port++) {
+ u32 lic = SHMEM_RD(bp,
+ drv_lic_key[port].max_fcoe_conn) ^
+ FW_ENCODE_32BIT_PATTERN;
+ if (lic)
+ count++;
+ }
+ }
+
+ return count;
+}
+
static void bnx2x_get_fcoe_info(struct bnx2x *bp)
{
int port = BP_PORT(bp);
int func = BP_ABS_FUNC(bp);
u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
drv_lic_key[port].max_fcoe_conn);
+ u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
if (!CNIC_SUPPORT(bp)) {
bp->flags |= NO_FCOE_FLAG;
@@ -10498,26 +11289,33 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+ /* Calculate the number of maximum allowed FCoE tasks */
+ bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
+
+ /* check if FCoE resources must be shared between different functions */
+ if (num_fcoe_func)
+ bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
+
/* Read the WWN: */
if (!IS_MF(bp)) {
/* Port info */
bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_port_name_upper);
bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_port_name_lower);
/* Node info */
bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_node_name_upper);
bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_node_name_lower);
} else if (!IS_MF_SD(bp)) {
/*
@@ -10611,14 +11409,12 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
}
}
- if (IS_MF_STORAGE_SD(bp))
- /* Zero primary MAC configuration */
- memset(bp->dev->dev_addr, 0, ETH_ALEN);
-
+ /* If this is a storage-only interface, use SAN mac as
+ * primary MAC. Notice that for SD this is already the case,
+ * as the SAN mac was copied from the primary MAC.
+ */
if (IS_MF_FCOE_AFEX(bp))
- /* use FIP MAC as primary MAC */
memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
-
} else {
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_upper);
@@ -10677,8 +11473,15 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
bnx2x_get_cnic_mac_hwinfo(bp);
}
+ if (!BP_NOMCP(bp)) {
+ /* Read physical port identifier from shmem */
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+ bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
+ bp->flags |= HAS_PHYS_PORT_ID;
+ }
+
memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
- memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
@@ -10692,6 +11495,9 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp)
int tmp;
u32 cfg;
+ if (IS_VF(bp))
+ return 0;
+
if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
/* Take function: tmp = func */
tmp = BP_ABS_FUNC(bp);
@@ -10727,7 +11533,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
} else {
bp->common.int_block = INT_BLOCK_IGU;
- /* do not allow device reset during IGU info preocessing */
+ /* do not allow device reset during IGU info processing */
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
@@ -10743,7 +11549,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
tout--;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
@@ -10806,7 +11612,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
E1H_FUNC_MAX * sizeof(struct drv_func_mb);
/*
* get mf configuration:
- * 1. existence of MF configuration
+ * 1. Existence of MF configuration
* 2. MAC address must be legal (check only upper bytes)
* for Switch-Independent mode;
* OVLAN must be legal for Switch-Dependent mode
@@ -10856,6 +11662,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
} else
BNX2X_DEV_INFO("illegal OV for SD\n");
break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
+ bp->mf_config[vn] = 0;
+ break;
default:
/* Unknown configuration: reset mf_config */
bp->mf_config[vn] = 0;
@@ -10918,9 +11727,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
}
}
- /* adjust igu_sb_cnt to MF for E1x */
- if (CHIP_IS_E1x(bp) && IS_MF(bp))
- bp->igu_sb_cnt /= E1HVN_MAX;
+ /* adjust igu_sb_cnt to MF for E1H */
+ if (CHIP_IS_E1H(bp) && IS_MF(bp))
+ bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
/* port info */
bnx2x_get_port_hwinfo(bp);
@@ -11075,15 +11884,22 @@ static int bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
+ mutex_init(&bp->drv_info_mutex);
+ bp->drv_info_mng_owner = false;
spin_lock_init(&bp->stats_lock);
-
+ sema_init(&bp->stats_sema, 1);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
- rc = bnx2x_get_hwinfo(bp);
- if (rc)
- return rc;
+ INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
+ if (IS_PF(bp)) {
+ rc = bnx2x_get_hwinfo(bp);
+ if (rc)
+ return rc;
+ } else {
+ eth_zero_addr(bp->dev->dev_addr);
+ }
bnx2x_set_modes_bitmap(bp);
@@ -11096,17 +11912,20 @@ static int bnx2x_init_bp(struct bnx2x *bp)
func = BP_FUNC(bp);
/* need to reset chip if undi was active */
- if (!BP_NOMCP(bp)) {
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
/* init fw_seq */
bp->fw_seq =
SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK;
BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
- bnx2x_prev_unload(bp);
+ rc = bnx2x_prev_unload(bp);
+ if (rc) {
+ bnx2x_free_mem_bp(bp);
+ return rc;
+ }
}
-
if (CHIP_REV_IS_FPGA(bp))
dev_err(&bp->pdev->dev, "FPGA detected\n");
@@ -11115,6 +11934,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->disable_tpa = disable_tpa;
bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
+ /* Reduce memory usage in kdump environment by disabling TPA */
+ bp->disable_tpa |= reset_devices;
/* Set TPA flags */
if (bp->disable_tpa) {
@@ -11133,6 +11954,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->mrrs = mrrs;
bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
+ if (IS_VF(bp))
+ bp->rx_ring_size = MAX_RX_AVAIL;
/* make sure that the numbers are in the right granularity */
bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
@@ -11161,27 +11984,36 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->cnic_base_cl_id = FP_SB_MAX_E2;
/* multiple tx priority */
- if (CHIP_IS_E1x(bp))
+ if (IS_VF(bp))
+ bp->max_cos = 1;
+ else if (CHIP_IS_E1x(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
- if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
+ else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
- if (CHIP_IS_E3B0(bp))
+ else if (CHIP_IS_E3B0(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+ else
+ BNX2X_ERR("unknown chip %x revision %x\n",
+ CHIP_NUM(bp), CHIP_REV(bp));
+ BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
/* We need at least one default status block for slow-path events,
* second status block for the L2 queue, and a third status block for
- * CNIC if supproted.
+ * CNIC if supported.
*/
- if (CNIC_SUPPORT(bp))
+ if (IS_VF(bp))
+ bp->min_msix_vec_cnt = 1;
+ else if (CNIC_SUPPORT(bp))
bp->min_msix_vec_cnt = 3;
- else
+ else /* PF w/o cnic */
bp->min_msix_vec_cnt = 2;
BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
+ bp->dump_preset_idx = 1;
+
return rc;
}
-
/****************************************************************************
* General service functions
****************************************************************************/
@@ -11194,9 +12026,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
static int bnx2x_open(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- bool global = false;
- int other_engine = BP_PATH(bp) ? 0 : 1;
- bool other_load_status, load_status;
+ int rc;
bp->stats_init = true;
@@ -11204,53 +12034,61 @@ static int bnx2x_open(struct net_device *dev)
bnx2x_set_power_state(bp, PCI_D0);
- other_load_status = bnx2x_get_load_status(bp, other_engine);
- load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
-
- /*
- * If parity had happen during the unload, then attentions
+ /* If parity had happen during the unload, then attentions
* and/or RECOVERY_IN_PROGRES may still be set. In this case we
* want the first function loaded on the current engine to
* complete the recovery.
+ * Parity recovery is only relevant for PF driver.
*/
- if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
- bnx2x_chk_parity_attn(bp, &global, true))
- do {
- /*
- * If there are attentions and they are in a global
- * blocks, set the GLOBAL_RESET bit regardless whether
- * it will be this function that will complete the
- * recovery or not.
- */
- if (global)
- bnx2x_set_reset_global(bp);
+ if (IS_PF(bp)) {
+ int other_engine = BP_PATH(bp) ? 0 : 1;
+ bool other_load_status, load_status;
+ bool global = false;
+
+ other_load_status = bnx2x_get_load_status(bp, other_engine);
+ load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
+ if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
+ bnx2x_chk_parity_attn(bp, &global, true)) {
+ do {
+ /* If there are attentions and they are in a
+ * global blocks, set the GLOBAL_RESET bit
+ * regardless whether it will be this function
+ * that will complete the recovery or not.
+ */
+ if (global)
+ bnx2x_set_reset_global(bp);
- /*
- * Only the first function on the current engine should
- * try to recover in open. In case of attentions in
- * global blocks only the first in the chip should try
- * to recover.
- */
- if ((!load_status &&
- (!global || !other_load_status)) &&
- bnx2x_trylock_leader_lock(bp) &&
- !bnx2x_leader_reset(bp)) {
- netdev_info(bp->dev, "Recovered in open\n");
- break;
- }
+ /* Only the first function on the current
+ * engine should try to recover in open. In case
+ * of attentions in global blocks only the first
+ * in the chip should try to recover.
+ */
+ if ((!load_status &&
+ (!global || !other_load_status)) &&
+ bnx2x_trylock_leader_lock(bp) &&
+ !bnx2x_leader_reset(bp)) {
+ netdev_info(bp->dev,
+ "Recovered in open\n");
+ break;
+ }
- /* recovery has failed... */
- bnx2x_set_power_state(bp, PCI_D3hot);
- bp->recovery_state = BNX2X_RECOVERY_FAILED;
+ /* recovery has failed... */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+ bp->recovery_state = BNX2X_RECOVERY_FAILED;
- BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
- "If you still see this message after a few retries then power cycle is required.\n");
+ BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
+ "If you still see this message after a few retries then power cycle is required.\n");
- return -EAGAIN;
- } while (0);
+ return -EAGAIN;
+ } while (0);
+ }
+ }
bp->recovery_state = BNX2X_RECOVERY_DONE;
- return bnx2x_nic_load(bp, LOAD_OPEN);
+ rc = bnx2x_nic_load(bp, LOAD_OPEN);
+ if (rc)
+ return rc;
+ return 0;
}
/* called with rtnl_lock */
@@ -11261,9 +12099,6 @@ static int bnx2x_close(struct net_device *dev)
/* Unload the driver, release IRQs */
bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
- /* Power off */
- bnx2x_set_power_state(bp, PCI_D3hot);
-
return 0;
}
@@ -11272,7 +12107,7 @@ static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
{
int mc_count = netdev_mc_count(bp->dev);
struct bnx2x_mcast_list_elem *mc_mac =
- kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
+ kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
struct netdev_hw_addr *ha;
if (!mc_mac)
@@ -11384,33 +12219,53 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
return rc;
}
-
/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
-void bnx2x_set_rx_mode(struct net_device *dev)
+static void bnx2x_set_rx_mode(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- u32 rx_mode = BNX2X_RX_MODE_NORMAL;
if (bp->state != BNX2X_STATE_OPEN) {
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
return;
+ } else {
+ /* Schedule an SP task to handle rest of change */
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
+ NETIF_MSG_IFUP);
}
+}
+
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
+{
+ u32 rx_mode = BNX2X_RX_MODE_NORMAL;
DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
- if (dev->flags & IFF_PROMISC)
+ netif_addr_lock_bh(bp->dev);
+
+ if (bp->dev->flags & IFF_PROMISC) {
rx_mode = BNX2X_RX_MODE_PROMISC;
- else if ((dev->flags & IFF_ALLMULTI) ||
- ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
- CHIP_IS_E1(bp)))
+ } else if ((bp->dev->flags & IFF_ALLMULTI) ||
+ ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp))) {
rx_mode = BNX2X_RX_MODE_ALLMULTI;
- else {
- /* some multicasts */
- if (bnx2x_set_mc_list(bp) < 0)
- rx_mode = BNX2X_RX_MODE_ALLMULTI;
-
- if (bnx2x_set_uc_list(bp) < 0)
- rx_mode = BNX2X_RX_MODE_PROMISC;
+ } else {
+ if (IS_PF(bp)) {
+ /* some multicasts */
+ if (bnx2x_set_mc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
+
+ /* release bh lock, as bnx2x_set_uc_list might sleep */
+ netif_addr_unlock_bh(bp->dev);
+ if (bnx2x_set_uc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_PROMISC;
+ netif_addr_lock_bh(bp->dev);
+ } else {
+ /* configuring mcast to a vf involves sleeping (when we
+ * wait for the pf's response).
+ */
+ bnx2x_schedule_sp_rtnl(bp,
+ BNX2X_SP_RTNL_VFPF_MCAST, 0);
+ }
}
bp->rx_mode = rx_mode;
@@ -11421,10 +12276,21 @@ void bnx2x_set_rx_mode(struct net_device *dev)
/* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ netif_addr_unlock_bh(bp->dev);
return;
}
- bnx2x_set_storm_rx_mode(bp);
+ if (IS_PF(bp)) {
+ bnx2x_set_storm_rx_mode(bp);
+ netif_addr_unlock_bh(bp->dev);
+ } else {
+ /* VF will need to request the PF to make this change, and so
+ * the VF needs to release the bottom-half lock prior to the
+ * request (as it will likely require sleep on the VF side)
+ */
+ netif_addr_unlock_bh(bp->dev);
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
}
/* called with rtnl_lock */
@@ -11503,6 +12369,10 @@ static int bnx2x_validate_addr(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
+ /* query the bulletin board for mac address configured by the PF */
+ if (IS_VF(bp))
+ bnx2x_sample_bulletin(bp);
+
if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
BNX2X_ERR("Non-valid Ethernet address\n");
return -EADDRNOTAVAIL;
@@ -11510,6 +12380,20 @@ static int bnx2x_validate_addr(struct net_device *dev)
return 0;
}
+static int bnx2x_get_phys_port_id(struct net_device *netdev,
+ struct netdev_phys_port_id *ppid)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ if (!(bp->flags & HAS_PHYS_PORT_ID))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(bp->phys_port_id);
+ memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
@@ -11527,23 +12411,27 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_poll_controller = poll_bnx2x,
#endif
.ndo_setup_tc = bnx2x_setup_tc,
-
+#ifdef CONFIG_BNX2X_SRIOV
+ .ndo_set_vf_mac = bnx2x_set_vf_mac,
+ .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
+ .ndo_get_vf_config = bnx2x_get_vf_config,
+#endif
#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
#endif
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = bnx2x_low_latency_recv,
+#endif
+ .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
{
struct device *dev = &bp->pdev->dev;
- if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
- bp->flags |= USING_DAC_FLAG;
- if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
- dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
- return -EIO;
- }
- } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
dev_err(dev, "System does not support DMA, aborting\n");
return -EIO;
}
@@ -11551,10 +12439,17 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
return 0;
}
-static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
- unsigned long board_type)
+static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
+{
+ if (bp->flags & AER_ENABLED) {
+ pci_disable_pcie_error_reporting(bp->pdev);
+ bp->flags &= ~AER_ENABLED;
+ }
+}
+
+static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
+ struct net_device *dev, unsigned long board_type)
{
- struct bnx2x *bp;
int rc;
u32 pci_cfg_dword;
bool chip_is_e1x = (board_type == BCM57710 ||
@@ -11562,11 +12457,9 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
board_type == BCM57711E);
SET_NETDEV_DEV(dev, &pdev->dev);
- bp = netdev_priv(dev);
bp->dev = dev;
bp->pdev = pdev;
- bp->flags = 0;
rc = pci_enable_device(pdev);
if (rc) {
@@ -11582,9 +12475,8 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
goto err_out_disable;
}
- if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- dev_err(&bp->pdev->dev, "Cannot find second PCI device"
- " base address, aborting\n");
+ if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
@@ -11609,12 +12501,13 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
pci_save_state(pdev);
}
- bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (bp->pm_cap == 0) {
- dev_err(&bp->pdev->dev,
- "Cannot find power management capability, aborting\n");
- rc = -EIO;
- goto err_out_release;
+ if (IS_PF(bp)) {
+ if (!pdev->pm_cap) {
+ dev_err(&bp->pdev->dev,
+ "Cannot find power management capability, aborting\n");
+ rc = -EIO;
+ goto err_out_release;
+ }
}
if (!pci_is_pcie(pdev)) {
@@ -11646,62 +12539,81 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
* support Physical Device Assignment where kernel BDF maybe arbitrary
* (depending on hypervisor).
*/
- if (chip_is_e1x)
+ if (chip_is_e1x) {
bp->pf_num = PCI_FUNC(pdev->devfn);
- else {/* chip is E2/3*/
+ } else {
+ /* chip is E2/3*/
pci_read_config_dword(bp->pdev,
PCICFG_ME_REGISTER, &pci_cfg_dword);
bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
- ME_REG_ABS_PF_NUM_SHIFT);
+ ME_REG_ABS_PF_NUM_SHIFT);
}
BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
- bnx2x_set_power_state(bp, PCI_D0);
-
/* clean indirect addresses */
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET);
+
+ /* AER (Advanced Error reporting) configuration */
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (!rc)
+ bp->flags |= AER_ENABLED;
+ else
+ BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
+
/*
* Clean the following indirect addresses for all functions since it
* is not used by the driver.
*/
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+ if (IS_PF(bp)) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+
+ if (chip_is_e1x) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ }
- if (chip_is_e1x) {
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ /* Enable internal target-read (in case we are probed after PF
+ * FLR). Must be done prior to any BAR read access. Only for
+ * 57712 and up
+ */
+ if (!chip_is_e1x)
+ REG_WR(bp,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
}
- /*
- * Enable internal target-read (in case we are probed after PF FLR).
- * Must be done prior to any BAR read access. Only for 57712 and up
- */
- if (!chip_is_e1x)
- REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
-
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
- bnx2x_set_ethtool_ops(dev);
+ bnx2x_set_ethtool_ops(bp, dev);
dev->priv_flags |= IFF_UNICAST_FLT;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
- NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
+ NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
+ if (!CHIP_IS_E1x(bp)) {
+ dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+ dev->hw_enc_features =
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_GSO_IPIP |
+ NETIF_F_GSO_SIT |
+ NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+ }
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
- dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
- if (bp->flags & USING_DAC_FLAG)
- dev->features |= NETIF_F_HIGHDMA;
+ dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
+ dev->features |= NETIF_F_HIGHDMA;
/* Add Loopback capability to the device */
dev->hw_features |= NETIF_F_LOOPBACK;
@@ -11726,29 +12638,18 @@ err_out_release:
err_out_disable:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
return rc;
}
-static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed)
-{
- u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
-
- *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
-
- /* return value of 1=2.5GHz 2=5GHz */
- *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
-}
-
static int bnx2x_check_firmware(struct bnx2x *bp)
{
const struct firmware *firmware = bp->firmware;
struct bnx2x_fw_file_hdr *fw_hdr;
struct bnx2x_fw_file_section *sections;
u32 offset, len, num_ops;
- u16 *ops_offsets;
+ __be16 *ops_offsets;
int i;
const u8 *fw_ver;
@@ -11773,7 +12674,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
/* Likewise for the init_ops offsets */
offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
- ops_offsets = (u16 *)(firmware->data + offset);
+ ops_offsets = (__force __be16 *)(firmware->data + offset);
num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
@@ -11964,7 +12865,6 @@ static void bnx2x_release_firmware(struct bnx2x *bp)
bp->firmware = NULL;
}
-
static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
.init_hw_cmn_chip = bnx2x_init_hw_common_chip,
.init_hw_cmn = bnx2x_init_hw_common,
@@ -12000,8 +12900,12 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
{
int cid_count = BNX2X_L2_MAX_CID(bp);
+ if (IS_SRIOV(bp))
+ cid_count += BNX2X_VF_CIDS;
+
if (CNIC_SUPPORT(bp))
cid_count += CNIC_CID_MAX;
+
return roundup(cid_count, QM_CID_ROUND);
}
@@ -12011,97 +12915,125 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
* @dev: pci device
*
*/
-static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
- int cnic_cnt)
+static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
{
- int pos;
- u16 control;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ int index;
+ u16 control = 0;
/*
* If MSI-X is not supported - return number of SBs needed to support
* one fast path queue: one FP queue + SB for CNIC
*/
- if (!pos)
+ if (!pdev->msix_cap) {
+ dev_info(&pdev->dev, "no msix capability found\n");
return 1 + cnic_cnt;
+ }
+ dev_info(&pdev->dev, "msix capability found\n");
/*
* The value in the PCI configuration space is the index of the last
* entry, namely one less than the actual size of the table, which is
* exactly what we want to return from this function: number of all SBs
* without the default SB.
+ * For VFs there is no default SB, then we return (index+1).
*/
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
- return control & PCI_MSIX_FLAGS_QSIZE;
-}
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *);
+ index = control & PCI_MSIX_FLAGS_QSIZE;
-static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct net_device *dev = NULL;
- struct bnx2x *bp;
- int pcie_width, pcie_speed;
- int rc, max_non_def_sbs;
- int rx_count, tx_count, rss_count, doorbell_size;
- int cnic_cnt;
- /*
- * An estimated maximum supported CoS number according to the chip
- * version.
- * We will try to roughly estimate the maximum number of CoSes this chip
- * may support in order to minimize the memory allocated for Tx
- * netdev_queue's. This number will be accurately calculated during the
- * initialization of bp->max_cos based on the chip versions AND chip
- * revision in the bnx2x_init_bp().
- */
- u8 max_cos_est = 0;
+ return index;
+}
- switch (ent->driver_data) {
+static int set_max_cos_est(int chip_id)
+{
+ switch (chip_id) {
case BCM57710:
case BCM57711:
case BCM57711E:
- max_cos_est = BNX2X_MULTI_TX_COS_E1X;
- break;
-
+ return BNX2X_MULTI_TX_COS_E1X;
case BCM57712:
case BCM57712_MF:
- max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
- break;
-
+ return BNX2X_MULTI_TX_COS_E2_E3A0;
case BCM57800:
case BCM57800_MF:
case BCM57810:
case BCM57810_MF:
- case BCM57840_O:
case BCM57840_4_10:
case BCM57840_2_20:
+ case BCM57840_O:
case BCM57840_MFO:
case BCM57840_MF:
case BCM57811:
case BCM57811_MF:
- max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
- break;
-
+ return BNX2X_MULTI_TX_COS_E3B0;
+ case BCM57712_VF:
+ case BCM57800_VF:
+ case BCM57810_VF:
+ case BCM57840_VF:
+ case BCM57811_VF:
+ return 1;
default:
- pr_err("Unknown board_type (%ld), aborting\n",
- ent->driver_data);
+ pr_err("Unknown board_type (%d), aborting\n", chip_id);
return -ENODEV;
}
+}
+
+static int set_is_vf(int chip_id)
+{
+ switch (chip_id) {
+ case BCM57712_VF:
+ case BCM57800_VF:
+ case BCM57810_VF:
+ case BCM57840_VF:
+ case BCM57811_VF:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int bnx2x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct bnx2x *bp;
+ enum pcie_link_width pcie_width;
+ enum pci_bus_speed pcie_speed;
+ int rc, max_non_def_sbs;
+ int rx_count, tx_count, rss_count, doorbell_size;
+ int max_cos_est;
+ bool is_vf;
+ int cnic_cnt;
+
+ /* An estimated maximum supported CoS number according to the chip
+ * version.
+ * We will try to roughly estimate the maximum number of CoSes this chip
+ * may support in order to minimize the memory allocated for Tx
+ * netdev_queue's. This number will be accurately calculated during the
+ * initialization of bp->max_cos based on the chip versions AND chip
+ * revision in the bnx2x_init_bp().
+ */
+ max_cos_est = set_max_cos_est(ent->driver_data);
+ if (max_cos_est < 0)
+ return max_cos_est;
+ is_vf = set_is_vf(ent->driver_data);
+ cnic_cnt = is_vf ? 0 : 1;
- cnic_cnt = 1;
max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
- WARN_ON(!max_non_def_sbs);
+ /* add another SB for VF as it has no default SB */
+ max_non_def_sbs += is_vf ? 1 : 0;
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
rss_count = max_non_def_sbs - cnic_cnt;
+ if (rss_count < 1)
+ return -EINVAL;
+
/* Maximum number of netdev Rx queues: RSS + FCoE L2 */
rx_count = rss_count + cnic_cnt;
- /*
- * Maximum number of netdev Tx queues:
+ /* Maximum number of netdev Tx queues:
* Maximum TSS queues * Maximum supported number of CoS + FCoE L2
*/
tx_count = rss_count * max_cos_est + cnic_cnt;
@@ -12113,42 +13045,55 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp = netdev_priv(dev);
+ bp->flags = 0;
+ if (is_vf)
+ bp->flags |= IS_VF_FLAG;
+
bp->igu_sb_cnt = max_non_def_sbs;
+ bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
bp->msg_enable = debug;
bp->cnic_support = cnic_cnt;
bp->cnic_probe = bnx2x_cnic_probe;
pci_set_drvdata(pdev, dev);
- rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
+ rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
if (rc < 0) {
free_netdev(dev);
return rc;
}
+ BNX2X_DEV_INFO("This is a %s function\n",
+ IS_PF(bp) ? "physical" : "virtual");
BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
- BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
-
+ BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
- tx_count, rx_count);
+ tx_count, rx_count);
rc = bnx2x_init_bp(bp);
if (rc)
goto init_one_exit;
- /*
- * Map doorbels here as we need the real value of bp->max_cos which
- * is initialized in bnx2x_init_bp().
+ /* Map doorbells here as we need the real value of bp->max_cos which
+ * is initialized in bnx2x_init_bp() to determine the number of
+ * l2 connections.
*/
- doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
- if (doorbell_size > pci_resource_len(pdev, 2)) {
- dev_err(&bp->pdev->dev,
- "Cannot map doorbells, bar size too small, aborting\n");
- rc = -ENOMEM;
- goto init_one_exit;
+ if (IS_VF(bp)) {
+ bp->doorbells = bnx2x_vf_doorbells(bp);
+ rc = bnx2x_vf_pci_alloc(bp);
+ if (rc)
+ goto init_one_exit;
+ } else {
+ doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
+ if (doorbell_size > pci_resource_len(pdev, 2)) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbells, bar size too small, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_exit;
+ }
+ bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+ doorbell_size);
}
- bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
- doorbell_size);
if (!bp->doorbells) {
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
@@ -12156,37 +13101,45 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_one_exit;
}
+ if (IS_VF(bp)) {
+ rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
+ if (rc)
+ goto init_one_exit;
+ }
+
+ /* Enable SRIOV if capability found in configuration space */
+ rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
+ if (rc)
+ goto init_one_exit;
+
/* calc qm_cid_count */
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
+ BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
/* disable FCOE L2 queue for E1x*/
if (CHIP_IS_E1x(bp))
bp->flags |= NO_FCOE_FLAG;
- /* disable FCOE for 57840 device, until FW supports it */
- switch (ent->driver_data) {
- case BCM57840_O:
- case BCM57840_4_10:
- case BCM57840_2_20:
- case BCM57840_MFO:
- case BCM57840_MF:
- bp->flags |= NO_FCOE_FLAG;
- }
-
/* Set bp->num_queues for MSI-X mode*/
bnx2x_set_num_queues(bp);
/* Configure interrupt mode: try to enable MSI-X/MSI if
* needed.
*/
- bnx2x_set_int_mode(bp);
+ rc = bnx2x_set_int_mode(bp);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot set interrupts\n");
+ goto init_one_exit;
+ }
+ BNX2X_DEV_INFO("set interrupts successfully\n");
+ /* register the net device */
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
goto init_one_exit;
}
-
+ BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
if (!NO_FCOE(bp)) {
/* Add storage MAC address */
@@ -12194,26 +13147,31 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
rtnl_unlock();
}
-
- bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
-
- BNX2X_DEV_INFO(
- "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
- board_info[ent->driver_data].name,
- (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
- pcie_width,
- ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
- (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
- "5GHz (Gen2)" : "2.5GHz",
- dev->base_addr, bp->pdev->irq, dev->dev_addr);
+ if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
+ pcie_speed == PCI_SPEED_UNKNOWN ||
+ pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
+ BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
+ else
+ BNX2X_DEV_INFO(
+ "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+ pcie_width,
+ pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
+ pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
+ pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
+ "Unknown",
+ dev->base_addr, bp->pdev->irq, dev->dev_addr);
return 0;
init_one_exit:
+ bnx2x_disable_pcie_error_reporting(bp);
+
if (bp->regview)
iounmap(bp->regview);
- if (bp->doorbells)
+ if (IS_PF(bp) && bp->doorbells)
iounmap(bp->doorbells);
free_netdev(dev);
@@ -12222,22 +13180,15 @@ init_one_exit:
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return rc;
}
-static void bnx2x_remove_one(struct pci_dev *pdev)
+static void __bnx2x_remove(struct pci_dev *pdev,
+ struct net_device *dev,
+ struct bnx2x *bp,
+ bool remove_netdev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct bnx2x *bp;
-
- if (!dev) {
- dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
- return;
- }
- bp = netdev_priv(dev);
-
/* Delete storage MAC address */
if (!NO_FCOE(bp)) {
rtnl_lock();
@@ -12250,44 +13201,90 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
bnx2x_dcbnl_update_applist(bp, true);
#endif
- unregister_netdev(dev);
+ if (IS_PF(bp) &&
+ !BP_NOMCP(bp) &&
+ (bp->flags & BC_SUPPORTS_RMMOD_CMD))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
+
+ /* Close the interface - either directly or implicitly */
+ if (remove_netdev) {
+ unregister_netdev(dev);
+ } else {
+ rtnl_lock();
+ dev_close(dev);
+ rtnl_unlock();
+ }
+
+ bnx2x_iov_remove_one(bp);
/* Power on: we can't let PCI layer write to us while we are in D3 */
- bnx2x_set_power_state(bp, PCI_D0);
+ if (IS_PF(bp))
+ bnx2x_set_power_state(bp, PCI_D0);
/* Disable MSI/MSI-X */
bnx2x_disable_msi(bp);
/* Power off */
- bnx2x_set_power_state(bp, PCI_D3hot);
+ if (IS_PF(bp))
+ bnx2x_set_power_state(bp, PCI_D3hot);
/* Make sure RESET task is not scheduled before continuing */
cancel_delayed_work_sync(&bp->sp_rtnl_task);
- if (bp->regview)
- iounmap(bp->regview);
+ /* send message via vfpf channel to release the resources of this vf */
+ if (IS_VF(bp))
+ bnx2x_vfpf_release(bp);
- if (bp->doorbells)
- iounmap(bp->doorbells);
+ /* Assumes no further PCIe PM changes will occur */
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, bp->wol);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
- bnx2x_release_firmware(bp);
+ bnx2x_disable_pcie_error_reporting(bp);
+ if (remove_netdev) {
+ if (bp->regview)
+ iounmap(bp->regview);
- bnx2x_free_mem_bp(bp);
+ /* For vfs, doorbells are part of the regview and were unmapped
+ * along with it. FW is only loaded by PF.
+ */
+ if (IS_PF(bp)) {
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
- free_netdev(dev);
+ bnx2x_release_firmware(bp);
+ } else {
+ bnx2x_vf_pci_dealloc(bp);
+ }
+ bnx2x_free_mem_bp(bp);
- if (atomic_read(&pdev->enable_cnt) == 1)
- pci_release_regions(pdev);
+ free_netdev(dev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+ }
}
-static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
+static void bnx2x_remove_one(struct pci_dev *pdev)
{
- int i;
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp;
+
+ if (!dev) {
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+ return;
+ }
+ bp = netdev_priv(dev);
- bp->state = BNX2X_STATE_ERROR;
+ __bnx2x_remove(pdev, dev, bp, true);
+}
+
+static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
+{
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
bp->rx_mode = BNX2X_RX_MODE_NONE;
@@ -12296,48 +13293,27 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
/* Stop Tx */
bnx2x_tx_disable(bp);
-
- bnx2x_netif_stop(bp, 0);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
if (CNIC_LOADED(bp))
bnx2x_del_all_napi_cnic(bp);
+ netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer);
+ cancel_delayed_work_sync(&bp->sp_task);
+ cancel_delayed_work_sync(&bp->period_task);
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
- /* Release IRQs */
- bnx2x_free_irq(bp);
-
- /* Free SKBs, SGEs, TPA pool and driver internals */
- bnx2x_free_skbs(bp);
-
- for_each_rx_queue(bp, i)
- bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-
- bnx2x_free_mem(bp);
+ spin_lock_bh(&bp->stats_lock);
+ bp->stats_state = STATS_STATE_DISABLED;
+ spin_unlock_bh(&bp->stats_lock);
- bp->state = BNX2X_STATE_CLOSED;
+ bnx2x_save_statistics(bp);
netif_carrier_off(bp->dev);
return 0;
}
-static void bnx2x_eeh_recover(struct bnx2x *bp)
-{
- u32 val;
-
- mutex_init(&bp->port.phy_mutex);
-
-
- val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
- if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
- != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
- BNX2X_ERR("BAD MCP validity signature\n");
-}
-
/**
* bnx2x_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -12354,6 +13330,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
+ BNX2X_ERR("IO error detected\n");
+
netif_device_detach(dev);
if (state == pci_channel_io_perm_failure) {
@@ -12364,6 +13342,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
if (netif_running(dev))
bnx2x_eeh_nic_unload(bp);
+ bnx2x_prev_path_mark_eeh(bp);
+
pci_disable_device(pdev);
rtnl_unlock();
@@ -12382,9 +13362,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2x *bp = netdev_priv(dev);
+ int i;
rtnl_lock();
-
+ BNX2X_ERR("IO slot reset initializing...\n");
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset\n");
@@ -12394,12 +13375,61 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
+ pci_save_state(pdev);
if (netif_running(dev))
bnx2x_set_power_state(bp, PCI_D0);
+ if (netif_running(dev)) {
+ BNX2X_ERR("IO slot reset --> driver unload\n");
+
+ /* MCP should have been reset; Need to wait for validity */
+ bnx2x_init_shmem(bp);
+
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ u32 v;
+
+ v = SHMEM2_RD(bp,
+ drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
+ bnx2x_drain_tx_queues(bp);
+ bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
+ bnx2x_netif_stop(bp, 1);
+ bnx2x_free_irq(bp);
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(bp, true);
+
+ bp->sp_state = 0;
+ bp->port.pmf = 0;
+
+ bnx2x_prev_unload(bp);
+
+ /* We should have reseted the engine, so It's fair to
+ * assume the FW will no longer write to the bnx2x driver.
+ */
+ bnx2x_squeeze_objects(bp);
+ bnx2x_free_skbs(bp);
+ for_each_rx_queue(bp, i)
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+ bnx2x_free_fp_mem(bp);
+ bnx2x_free_mem(bp);
+
+ bp->state = BNX2X_STATE_CLOSED;
+ }
+
rtnl_unlock();
+ /* If AER, perform cleanup of the PCIe registers */
+ if (bp->flags & AER_ENABLED) {
+ if (pci_cleanup_aer_uncorrect_error_status(pdev))
+ BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
+ else
+ DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
+ }
+
return PCI_ERS_RESULT_RECOVERED;
}
@@ -12422,7 +13452,8 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
rtnl_lock();
- bnx2x_eeh_recover(bp);
+ bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
if (netif_running(dev))
bnx2x_nic_load(bp, LOAD_NORMAL);
@@ -12438,6 +13469,29 @@ static const struct pci_error_handlers bnx2x_err_handler = {
.resume = bnx2x_io_resume,
};
+static void bnx2x_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp;
+
+ if (!dev)
+ return;
+
+ bp = netdev_priv(dev);
+ if (!bp)
+ return;
+
+ rtnl_lock();
+ netif_device_detach(dev);
+ rtnl_unlock();
+
+ /* Don't remove the netdevice, as there are scenarios which will cause
+ * the kernel to hang, e.g., when trying to remove bnx2i while the
+ * rootfs is mounted from SAN.
+ */
+ __bnx2x_remove(pdev, dev, bp, false);
+}
+
static struct pci_driver bnx2x_pci_driver = {
.name = DRV_MODULE_NAME,
.id_table = bnx2x_pci_tbl,
@@ -12446,6 +13500,10 @@ static struct pci_driver bnx2x_pci_driver = {
.suspend = bnx2x_suspend,
.resume = bnx2x_resume,
.err_handler = &bnx2x_err_handler,
+#ifdef CONFIG_BNX2X_SRIOV
+ .sriov_configure = bnx2x_sriov_configure,
+#endif
+ .shutdown = bnx2x_shutdown,
};
static int __init bnx2x_init(void)
@@ -12459,11 +13517,18 @@ static int __init bnx2x_init(void)
pr_err("Cannot create workqueue\n");
return -ENOMEM;
}
+ bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
+ if (!bnx2x_iov_wq) {
+ pr_err("Cannot create iov workqueue\n");
+ destroy_workqueue(bnx2x_wq);
+ return -ENOMEM;
+ }
ret = pci_register_driver(&bnx2x_pci_driver);
if (ret) {
pr_err("Cannot register driver\n");
destroy_workqueue(bnx2x_wq);
+ destroy_workqueue(bnx2x_iov_wq);
}
return ret;
}
@@ -12471,11 +13536,13 @@ static int __init bnx2x_init(void)
static void __exit bnx2x_cleanup(void)
{
struct list_head *pos, *q;
+
pci_unregister_driver(&bnx2x_pci_driver);
destroy_workqueue(bnx2x_wq);
+ destroy_workqueue(bnx2x_iov_wq);
- /* Free globablly allocated resources */
+ /* Free globally allocated resources */
list_for_each_safe(pos, q, &bnx2x_prev_list) {
struct bnx2x_prev_path_list *tmp =
list_entry(pos, struct bnx2x_prev_path_list, list);
@@ -12498,7 +13565,7 @@ module_exit(bnx2x_cleanup);
* @bp: driver handle
* @set: set or clear the CAM entry
*
- * This function will wait until the ramdord completion returns.
+ * This function will wait until the ramrod completion returns.
* Return 0 if success, -ENODEV if ramrod doesn't return.
*/
static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
@@ -12526,7 +13593,6 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
BUG_ON(bp->cnic_spq_pending < count);
bp->cnic_spq_pending -= count;
-
for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
& SPE_HDR_CONN_TYPE) >>
@@ -12699,7 +13765,6 @@ static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
bnx2x_cnic_sp_post(bp, 0);
}
-
/* Called with netif_addr_lock_bh() taken.
* Sets an rx_mode config for an iSCSI ETH client.
* Doesn't block.
@@ -12740,7 +13805,6 @@ static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
}
}
-
static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -12828,9 +13892,9 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
int count = ctl->data.credit.credit_count;
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_add(count, &bp->cq_spq_left);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
break;
}
case DRV_CTL_ULP_REGISTER_CMD: {
@@ -12870,6 +13934,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
REG_WR(bp, scratch_offset + i,
*(host_addr + i/4));
}
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
break;
}
@@ -12887,6 +13952,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
}
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
break;
}
@@ -12928,13 +13994,16 @@ void bnx2x_setup_cnic_info(struct bnx2x *bp)
{
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
-
cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
bnx2x_cid_ilt_lines(bp);
cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+ DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
+ BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
+ cp->iscsi_l2_cid);
+
if (NO_ISCSI_OOO(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
}
@@ -12964,7 +14033,6 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
BNX2X_ERR("CNIC-related load failed\n");
return rc;
}
-
}
bp->cnic_enabled = true;
@@ -12990,6 +14058,9 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
rcu_assign_pointer(bp->cnic_ops, ops);
+ /* Schedule driver to read CNIC driver versions */
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+
return 0;
}
@@ -13003,13 +14074,14 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
RCU_INIT_POINTER(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_mutex);
synchronize_rcu();
+ bp->cnic_enabled = false;
kfree(bp->cnic_kwq);
bp->cnic_kwq = NULL;
return 0;
}
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
@@ -13059,4 +14131,36 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
return cp;
}
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x *bp = fp->bp;
+ u32 offset = BAR_USTRORM_INTMEM;
+
+ if (IS_VF(bp))
+ return bnx2x_vf_ustorm_prods_offset(bp, fp);
+ else if (!CHIP_IS_E1x(bp))
+ offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
+ else
+ offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
+ return offset;
+}
+
+/* called only on E1H or E2.
+ * When pretending to be PF, the pretend value is the function number 0...7
+ * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
+ * combination
+ */
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
+{
+ u32 pretend_reg;
+
+ if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
+ return -1;
+
+ /* get my own pretend register */
+ pretend_reg = bnx2x_get_pretend_reg(bp);
+ REG_WR(bp, pretend_reg, pretend_func_val);
+ REG_RD(bp, pretend_reg);
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
index ddd5106ad2f..caf1aef651e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -1,6 +1,6 @@
/* bnx2x_mfw_req.h: Broadcom Everest network driver.
*
- * Copyright (c) 2012 Broadcom Corporation
+ * Copyright (c) 2012-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index bc2f65b3264..2beb5430b87 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1,6 +1,6 @@
/* bnx2x_reg.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -35,6 +35,8 @@
#define ATC_REG_ATC_INT_STS_CLR 0x1101c0
/* [RW 5] Parity mask register #0 read/write */
#define ATC_REG_ATC_PRTY_MASK 0x1101d8
+/* [R 5] Parity register #0 read */
+#define ATC_REG_ATC_PRTY_STS 0x1101cc
/* [RC 5] Parity register #0 read clear */
#define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0
/* [RW 19] Interrupt mask register #0 read/write */
@@ -825,6 +827,7 @@
/* [RW 28] The value sent to CM header in the case of CFC load error. */
#define DORQ_REG_ERR_CMHEAD 0x170058
#define DORQ_REG_IF_EN 0x170004
+#define DORQ_REG_MAX_RVFID_SIZE 0x1701ec
#define DORQ_REG_MODE_ACT 0x170008
/* [RW 5] The normal mode CID extraction offset. */
#define DORQ_REG_NORM_CID_OFST 0x17002c
@@ -847,6 +850,22 @@
writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The
read reads this written value. */
#define DORQ_REG_RSP_INIT_CRD 0x170048
+#define DORQ_REG_RSPB_CRD_CNT 0x1700b0
+#define DORQ_REG_VF_NORM_CID_BASE 0x1701a0
+#define DORQ_REG_VF_NORM_CID_OFST 0x1701f4
+#define DORQ_REG_VF_NORM_CID_WND_SIZE 0x1701a4
+#define DORQ_REG_VF_NORM_MAX_CID_COUNT 0x1701e4
+#define DORQ_REG_VF_NORM_VF_BASE 0x1701a8
+/* [RW 10] VF type validation mask value */
+#define DORQ_REG_VF_TYPE_MASK_0 0x170218
+/* [RW 17] VF type validation Min MCID value */
+#define DORQ_REG_VF_TYPE_MAX_MCID_0 0x1702d8
+/* [RW 17] VF type validation Max MCID value */
+#define DORQ_REG_VF_TYPE_MIN_MCID_0 0x170298
+/* [RW 10] VF type validation comp value */
+#define DORQ_REG_VF_TYPE_VALUE_0 0x170258
+#define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340
+
/* [RW 4] Initial activity counter value on the load request; when the
shortcut is done. */
#define DORQ_REG_SHRT_ACT_CNT 0x170070
@@ -859,6 +878,7 @@
#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2)
#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1)
#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0)
+#define DORQ_REG_VF_USAGE_CNT 0x170320
#define HC_REG_AGG_INT_0 0x108050
#define HC_REG_AGG_INT_1 0x108054
#define HC_REG_ATTN_BIT 0x108120
@@ -1473,10 +1493,6 @@
/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
Port. */
#define MISC_REG_BOND_ID 0xa400
-/* [R 8] These bits indicate the metal revision of the chip. This value
- starts at 0x00 for each all-layer tape-out and increments by one for each
- tape-out. */
-#define MISC_REG_CHIP_METAL 0xa404
/* [R 16] These bits indicate the part number for the chip. */
#define MISC_REG_CHIP_NUM 0xa408
/* [R 4] These bits indicate the base revision of the chip. This value
@@ -2136,6 +2152,8 @@
/* [R 32] Interrupt register #0 read */
#define NIG_REG_NIG_INT_STS_0 0x103b0
#define NIG_REG_NIG_INT_STS_1 0x103c0
+/* [RC 32] Interrupt register #0 read clear */
+#define NIG_REG_NIG_INT_STS_CLR_0 0x103b4
/* [R 32] Legacy E1 and E1H location for parity error mask register. */
#define NIG_REG_NIG_PRTY_MASK 0x103dc
/* [RW 32] Parity mask register #0 read/write */
@@ -2571,6 +2589,7 @@
current task in process). */
#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
#define PBF_REG_DISABLE_PF 0x1402e8
+#define PBF_REG_DISABLE_VF 0x1402ec
/* [RW 18] For port 0: For each client that is subject to WFQ (the
* corresponding bit is 1); indicates to which of the credit registers this
* client is mapped. For clients which are not credit blocked; their mapping
@@ -2733,6 +2752,8 @@
#define PBF_REG_PBF_INT_STS 0x1401c8
/* [RW 20] Parity mask register #0 read/write */
#define PBF_REG_PBF_PRTY_MASK 0x1401e4
+/* [R 28] Parity register #0 read */
+#define PBF_REG_PBF_PRTY_STS 0x1401d8
/* [RC 20] Parity register #0 read clear */
#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
/* [RW 16] The Ethernet type value for L2 tag 0 */
@@ -2843,6 +2864,17 @@
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434
#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438
+/* [W 7] Writing 1 to each bit in this register clears a corresponding error
+ * details register and enables logging new error details. Bit 0 - clears
+ * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears
+ * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS
+ * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32
+ * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 -
+ * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears
+ * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6
+ * - clears TCPL_IN_TWO_RCBS_DETAILS. */
+#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x943c
+
/* [R 9] Interrupt register #0 read */
#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298
/* [RC 9] Interrupt register #0 read clear */
@@ -3708,6 +3740,10 @@
#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c
/* [WB 160] Used for initialization of the inbound interrupts memory */
#define PXP_REG_HST_INBOUND_INT 0x103800
+/* [RW 7] Indirect access to the permission table. The fields are : {Valid;
+ * VFID[5:0]}
+ */
+#define PXP_REG_HST_ZONE_PERMISSION_TABLE 0x103400
/* [RW 32] Interrupt mask register #0 read/write */
#define PXP_REG_PXP_INT_MASK_0 0x103074
#define PXP_REG_PXP_INT_MASK_1 0x103084
@@ -4496,6 +4532,8 @@
#define TM_REG_TM_INT_STS 0x1640f0
/* [RW 7] Parity mask register #0 read/write */
#define TM_REG_TM_PRTY_MASK 0x16410c
+/* [R 7] Parity register #0 read */
+#define TM_REG_TM_PRTY_STS 0x164100
/* [RC 7] Parity register #0 read clear */
#define TM_REG_TM_PRTY_STS_CLR 0x164104
/* [RW 8] The event id for aggregated interrupt 0 */
@@ -5894,6 +5932,7 @@
#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
+#define MISC_REGISTERS_RESET_REG_1_RST_XSEM (0x1<<22)
#define MISC_REGISTERS_RESET_REG_1_SET 0x584
#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24)
@@ -5966,6 +6005,7 @@
#define HW_LOCK_RESOURCE_SPIO 2
#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT (0x1<<19)
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31)
#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30)
@@ -6305,6 +6345,18 @@
#define PCI_PM_DATA_B 0x414
#define PCI_ID_VAL1 0x434
#define PCI_ID_VAL2 0x438
+#define PCI_ID_VAL3 0x43c
+
+#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C
+#define GRC_CONFIG_REG_PF_INIT_VF 0x624
+#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
+/* First VF_NUM for PF is encoded in this register.
+ * The number of VFs assigned to a PF is assumed to be a multiple of 8.
+ * Software should program these bits based on Total Number of VFs \
+ * programmed for each PF.
+ * Since registers from 0x000-0x7ff are split across functions, each PF will
+ * have the same location for the same 4 bits
+ */
#define PXPCS_TL_CONTROL_5 0x814
#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
@@ -6554,6 +6606,27 @@
(7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
+#define PXP_VF_ADDR_IGU_START 0
+#define PXP_VF_ADDR_IGU_SIZE 0x3000
+#define PXP_VF_ADDR_IGU_END\
+ ((PXP_VF_ADDR_IGU_START) + (PXP_VF_ADDR_IGU_SIZE) - 1)
+
+#define PXP_VF_ADDR_USDM_QUEUES_START 0x3000
+#define PXP_VF_ADDR_USDM_QUEUES_SIZE\
+ (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE)
+#define PXP_VF_ADDR_USDM_QUEUES_END\
+ ((PXP_VF_ADDR_USDM_QUEUES_START) + (PXP_VF_ADDR_USDM_QUEUES_SIZE) - 1)
+
+#define PXP_VF_ADDR_CSDM_GLOBAL_START 0x7600
+#define PXP_VF_ADDR_CSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE)
+#define PXP_VF_ADDR_CSDM_GLOBAL_END\
+ ((PXP_VF_ADDR_CSDM_GLOBAL_START) + (PXP_VF_ADDR_CSDM_GLOBAL_SIZE) - 1)
+
+#define PXP_VF_ADDR_DB_START 0x7c00
+#define PXP_VF_ADDR_DB_SIZE 0x200
+#define PXP_VF_ADDR_DB_END\
+ ((PXP_VF_ADDR_DB_START) + (PXP_VF_ADDR_DB_SIZE) - 1)
+
#define MDIO_REG_BANK_CL73_IEEEB0 0x0
#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
@@ -7107,6 +7180,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
+#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
#define MDIO_WC_REG_XGXS_STATUS3 0x8129
#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 09b625e0fda..b1936044767 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1,6 +1,6 @@
/* bnx2x_sp.c: Broadcom Everest network driver.
*
- * Copyright (c) 2011-2012 Broadcom Corporation
+ * Copyright (c) 2011-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Vladislav Zolotarov
*
*/
@@ -30,16 +30,14 @@
#define BNX2X_MAX_EMUL_MULTI 16
-#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
-
/**** Exe Queue interfaces ****/
/**
* bnx2x_exe_queue_init - init the Exe Queue object
*
- * @o: poiter to the object
+ * @o: pointer to the object
* @exe_len: length
- * @owner: poiter to the owner
+ * @owner: pointer to the owner
* @validate: validate function pointer
* @optimize: optimize function pointer
* @exec: execute function pointer
@@ -144,7 +142,6 @@ free_and_exit:
spin_unlock_bh(&o->lock);
return rc;
-
}
static inline void __bnx2x_exe_queue_reset_pending(
@@ -162,18 +159,6 @@ static inline void __bnx2x_exe_queue_reset_pending(
}
}
-static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
- struct bnx2x_exe_queue_obj *o)
-{
-
- spin_lock_bh(&o->lock);
-
- __bnx2x_exe_queue_reset_pending(bp, o);
-
- spin_unlock_bh(&o->lock);
-
-}
-
/**
* bnx2x_exe_queue_step - execute one execution chunk atomically
*
@@ -181,7 +166,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
* @o: queue
* @ramrod_flags: flags
*
- * (Atomicy is ensured using the exe_queue->lock).
+ * (Should be called while holding the exe_queue->lock).
*/
static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
struct bnx2x_exe_queue_obj *o,
@@ -192,10 +177,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
memset(&spacer, 0, sizeof(spacer));
- spin_lock_bh(&o->lock);
-
- /*
- * Next step should not be performed until the current is finished,
+ /* Next step should not be performed until the current is finished,
* unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
* properly clear object internals without sending any command to the FW
* which also implies there won't be any completion to clear the
@@ -206,13 +188,11 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
__bnx2x_exe_queue_reset_pending(bp, o);
} else {
- spin_unlock_bh(&o->lock);
return 1;
}
}
- /*
- * Run through the pending commands list and create a next
+ /* Run through the pending commands list and create a next
* execution chunk.
*/
while (!list_empty(&o->exe_queue)) {
@@ -222,8 +202,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
cur_len += elem->cmd_len;
- /*
- * Prevent from both lists being empty when moving an
+ /* Prevent from both lists being empty when moving an
* element. This will allow the call of
* bnx2x_exe_queue_empty() without locking.
*/
@@ -236,26 +215,21 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
}
/* Sanity check */
- if (!cur_len) {
- spin_unlock_bh(&o->lock);
+ if (!cur_len)
return 0;
- }
rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
if (rc < 0)
- /*
- * In case of an error return the commands back to the queue
- * and reset the pending_comp.
+ /* In case of an error return the commands back to the queue
+ * and reset the pending_comp.
*/
list_splice_init(&o->pending_comp, &o->exe_queue);
else if (!rc)
- /*
- * If zero is returned, means there are no outstanding pending
+ /* If zero is returned, means there are no outstanding pending
* completions and we may dismiss the pending list.
*/
__bnx2x_exe_queue_reset_pending(bp, o);
- spin_unlock_bh(&o->lock);
return rc;
}
@@ -284,16 +258,16 @@ static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(o->state, o->pstate);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(o->state, o->pstate);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
/**
@@ -310,7 +284,6 @@ static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
/* can take a while if any port is running */
int cnt = 5000;
-
if (CHIP_REV_IS_EMUL(bp))
cnt *= 20;
@@ -325,7 +298,7 @@ static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
return 0;
}
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
if (bp->panic)
return -EIO;
@@ -382,23 +355,6 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->get(vp, 1);
}
-
-static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
-{
- struct bnx2x_credit_pool_obj *mp = o->macs_pool;
- struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
-
- if (!mp->get(mp, 1))
- return false;
-
- if (!vp->get(vp, 1)) {
- mp->put(mp, 1);
- return false;
- }
-
- return true;
-}
-
static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
{
struct bnx2x_credit_pool_obj *mp = o->macs_pool;
@@ -427,49 +383,220 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->put(vp, 1);
}
-static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+/**
+ * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details: Non-blocking implementation; should be called under execution
+ * queue lock.
+ */
+static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
{
- struct bnx2x_credit_pool_obj *mp = o->macs_pool;
- struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+ if (o->head_reader) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
+ return -EBUSY;
+ }
- if (!mp->put(mp, 1))
- return false;
+ DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
+ return 0;
+}
- if (!vp->put(vp, 1)) {
- mp->get(mp, 1);
- return false;
+/**
+ * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock; notice it might release
+ * and reclaim it during its run.
+ */
+static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ int rc;
+ unsigned long ramrod_flags = o->saved_ramrod_flags;
+
+ DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
+ ramrod_flags);
+ o->head_exe_request = false;
+ o->saved_ramrod_flags = 0;
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
+ if (rc != 0) {
+ BNX2X_ERR("execution of pending commands failed with rc %d\n",
+ rc);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
}
+}
- return true;
+/**
+ * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ * @ramrod_flags: ramrod flags of missed execution
+ *
+ * @details Should be called under execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long ramrod_flags)
+{
+ o->head_exe_request = true;
+ o->saved_ramrod_flags = ramrod_flags;
+ DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
+ ramrod_flags);
+}
+
+/**
+ * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ * execution exists, it would perform it - possibly releasing and
+ * reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ /* It's possible a new pending execution was added since this writer
+ * executed. If so, execute again. [Ad infinitum]
+ */
+ while (o->head_exe_request) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
+ __bnx2x_vlan_mac_h_exec_pending(bp, o);
+ }
+}
+
+
+/**
+ * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under the execution queue lock. May sleep. May
+ * release and reclaim execution queue lock during its run.
+ */
+static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ /* If we got here, we're holding lock --> no WRITER exists */
+ o->head_reader++;
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
+ o->head_reader);
+
+ return 0;
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details May sleep. Claims and releases execution queue lock during its run.
+ */
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ int rc;
+
+ spin_lock_bh(&o->exe_queue.lock);
+ rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
+ spin_unlock_bh(&o->exe_queue.lock);
+
+ return rc;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ * execution exists, it would be performed if this was the last
+ * reader. possibly releasing and reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ if (!o->head_reader) {
+ BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+ } else {
+ o->head_reader--;
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
+ o->head_reader);
+ }
+
+ /* It's possible a new pending execution was added, and that this reader
+ * was last - if so we need to execute the command.
+ */
+ if (!o->head_reader && o->head_exe_request) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
+
+ /* Writer release will do the trick */
+ __bnx2x_vlan_mac_h_write_unlock(bp, o);
+ }
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Notice if a pending execution exists, it would be performed if this
+ * was the last reader. Claims and releases the execution queue lock
+ * during its run.
+ */
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ spin_lock_bh(&o->exe_queue.lock);
+ __bnx2x_vlan_mac_h_read_unlock(bp, o);
+ spin_unlock_bh(&o->exe_queue.lock);
}
static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
- int n, u8 *buf)
+ int n, u8 *base, u8 stride, u8 size)
{
struct bnx2x_vlan_mac_registry_elem *pos;
- u8 *next = buf;
+ u8 *next = base;
int counter = 0;
+ int read_lock;
+
+ DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+ if (read_lock != 0)
+ BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
/* traverse list */
list_for_each_entry(pos, &o->head, link) {
if (counter < n) {
- /* place leading zeroes in buffer */
- memset(next, 0, MAC_LEADING_ZERO_CNT);
-
- /* place mac after leading zeroes*/
- memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
- ETH_ALEN);
-
- /* calculate address of next element and
- * advance counter
- */
+ memcpy(next, &pos->u, size);
counter++;
- next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
-
- DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
- counter, next, pos->u.mac.mac);
+ DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
+ counter, next);
+ next += stride + size;
}
}
+
+ if (read_lock == 0) {
+ DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
+ }
+
return counter * ETH_ALEN;
}
@@ -487,7 +614,8 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,
/* Check if a requested MAC already exists */
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return -EEXIST;
return 0;
@@ -508,25 +636,6 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp,
return 0;
}
-static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- union bnx2x_classification_ramrod_data *data)
-{
- struct bnx2x_vlan_mac_registry_elem *pos;
-
- DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
- data->vlan_mac.mac, data->vlan_mac.vlan);
-
- list_for_each_entry(pos, &o->head, link)
- if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
- (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)))
- return -EEXIST;
-
- return 0;
-}
-
-
/* check_del() callbacks */
static struct bnx2x_vlan_mac_registry_elem *
bnx2x_check_mac_del(struct bnx2x *bp,
@@ -538,7 +647,8 @@ static struct bnx2x_vlan_mac_registry_elem *
DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return pos;
return NULL;
@@ -560,25 +670,6 @@ static struct bnx2x_vlan_mac_registry_elem *
return NULL;
}
-static struct bnx2x_vlan_mac_registry_elem *
- bnx2x_check_vlan_mac_del(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- union bnx2x_classification_ramrod_data *data)
-{
- struct bnx2x_vlan_mac_registry_elem *pos;
-
- DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
- data->vlan_mac.mac, data->vlan_mac.vlan);
-
- list_for_each_entry(pos, &o->head, link)
- if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
- (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)))
- return pos;
-
- return NULL;
-}
-
/* check_move() callback */
static bool bnx2x_check_move(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *src_o,
@@ -614,7 +705,6 @@ static bool bnx2x_check_move_always_err(
return false;
}
-
static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
{
struct bnx2x_raw_obj *raw = &o->raw;
@@ -631,9 +721,8 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
return rx_tx_flag;
}
-
-void bnx2x_set_mac_in_nig(struct bnx2x *bp,
- bool add, unsigned char *dev_addr, int index)
+static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+ bool add, unsigned char *dev_addr, int index)
{
u32 wb_data[2];
u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
@@ -698,7 +787,7 @@ static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
*
* @cid: connection id
* @type: BNX2X_FILTER_XXX_PENDING
- * @hdr: poiter to header to setup
+ * @hdr: pointer to header to setup
* @rule_cnt:
*
* currently we always configure one rule and echo field to contain a CID and an
@@ -707,11 +796,11 @@ static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
struct eth_classify_header *hdr, int rule_cnt)
{
- hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
+ hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
+ (type << BNX2X_SWCID_SHIFT));
hdr->rule_cnt = (u8)rule_cnt;
}
-
/* hw_config() callbacks */
static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o,
@@ -727,8 +816,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
- /*
- * Set LLH CAM entry: currently only iSCSI and ETH macs are
+ /* Set LLH CAM entry: currently only iSCSI and ETH macs are
* relevant. In addition, current implementation is tuned for a
* single ETH MAC.
*
@@ -769,6 +857,8 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == BNX2X_VLAN_MAC_MOVE) {
@@ -785,6 +875,9 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.
+ u.mac.is_inner_mac);
}
/* Set the ramrod data header */
@@ -813,8 +906,9 @@ static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
hdr->length = 1;
hdr->offset = (u8)cam_offset;
- hdr->client_id = 0xff;
- hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
+ hdr->client_id = cpu_to_le16(0xff);
+ hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (type << BNX2X_SWCID_SHIFT));
}
static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
@@ -877,8 +971,7 @@ static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
struct bnx2x_raw_obj *raw = &o->raw;
struct mac_configuration_cmd *config =
(struct mac_configuration_cmd *)(raw->rdata);
- /*
- * 57710 and 57711 do not support MOVE command,
+ /* 57710 and 57711 do not support MOVE command,
* so it's either ADD or DEL
*/
bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
@@ -903,7 +996,7 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
(struct eth_classify_rules_ramrod_data *)(raw->rdata);
int rule_cnt = rule_idx + 1;
union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
- int cmd = elem->cmd_data.vlan_mac.cmd;
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
@@ -943,104 +1036,12 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
rule_cnt);
}
-static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- struct bnx2x_exeq_elem *elem,
- int rule_idx, int cam_offset)
-{
- struct bnx2x_raw_obj *raw = &o->raw;
- struct eth_classify_rules_ramrod_data *data =
- (struct eth_classify_rules_ramrod_data *)(raw->rdata);
- int rule_cnt = rule_idx + 1;
- union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
- int cmd = elem->cmd_data.vlan_mac.cmd;
- bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
- u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
- u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
-
-
- /* Reset the ramrod data buffer for the first rule */
- if (rule_idx == 0)
- memset(data, 0, sizeof(*data));
-
- /* Set a rule header */
- bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
- &rule_entry->pair.header);
-
- /* Set VLAN and MAC themselvs */
- rule_entry->pair.vlan = cpu_to_le16(vlan);
- bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
- &rule_entry->pair.mac_mid,
- &rule_entry->pair.mac_lsb, mac);
-
- /* MOVE: Add a rule that will add this MAC to the target Queue */
- if (cmd == BNX2X_VLAN_MAC_MOVE) {
- rule_entry++;
- rule_cnt++;
-
- /* Setup ramrod data */
- bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
- elem->cmd_data.vlan_mac.target_obj,
- true, CLASSIFY_RULE_OPCODE_PAIR,
- &rule_entry->pair.header);
-
- /* Set a VLAN itself */
- rule_entry->pair.vlan = cpu_to_le16(vlan);
- bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
- &rule_entry->pair.mac_mid,
- &rule_entry->pair.mac_lsb, mac);
- }
-
- /* Set the ramrod data header */
- /* TODO: take this to the higher level in order to prevent multiple
- writing */
- bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
- rule_cnt);
-}
-
-/**
- * bnx2x_set_one_vlan_mac_e1h -
- *
- * @bp: device handle
- * @o: bnx2x_vlan_mac_obj
- * @elem: bnx2x_exeq_elem
- * @rule_idx: rule_idx
- * @cam_offset: cam_offset
- */
-static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- struct bnx2x_exeq_elem *elem,
- int rule_idx, int cam_offset)
-{
- struct bnx2x_raw_obj *raw = &o->raw;
- struct mac_configuration_cmd *config =
- (struct mac_configuration_cmd *)(raw->rdata);
- /*
- * 57710 and 57711 do not support MOVE command,
- * so it's either ADD or DEL
- */
- bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
- true : false;
-
- /* Reset the ramrod data buffer */
- memset(config, 0, sizeof(*config));
-
- bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
- cam_offset, add,
- elem->cmd_data.vlan_mac.u.vlan_mac.mac,
- elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
- ETH_VLAN_FILTER_CLASSIFY, config);
-}
-
-#define list_next_entry(pos, member) \
- list_entry((pos)->member.next, typeof(*(pos)), member)
-
/**
* bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
*
* @bp: device handle
* @p: command parameters
- * @ppos: pointer to the cooky
+ * @ppos: pointer to the cookie
*
* reconfigure next MAC/VLAN/VLAN-MAC element from the
* previously configured elements list.
@@ -1048,7 +1049,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
* from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
* into an account
*
- * pointer to the cooky - that should be given back in the next call to make
+ * pointer to the cookie - that should be given back in the next call to make
* function handle the next element. If *ppos is set to NULL it will restart the
* iterator. If returned *ppos == NULL this means that the last element has been
* handled.
@@ -1096,8 +1097,7 @@ static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
return bnx2x_config_vlan_mac(bp, p);
}
-/*
- * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
+/* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
* pointer to an element with a specific criteria and NULL if such an element
* hasn't been found.
*/
@@ -1135,24 +1135,6 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
return NULL;
}
-static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
- struct bnx2x_exe_queue_obj *o,
- struct bnx2x_exeq_elem *elem)
-{
- struct bnx2x_exeq_elem *pos;
- struct bnx2x_vlan_mac_ramrod_data *data =
- &elem->cmd_data.vlan_mac.u.vlan_mac;
-
- /* Check pending for execution commands */
- list_for_each_entry(pos, &o->exe_queue, link)
- if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
- sizeof(*data)) &&
- (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
- return pos;
-
- return NULL;
-}
-
/**
* bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
*
@@ -1181,8 +1163,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
return rc;
}
- /*
- * Check if there is a pending ADD command for this
+ /* Check if there is a pending ADD command for this
* MAC/VLAN/VLAN-MAC. Return an error if there is.
*/
if (exeq->get(exeq, elem)) {
@@ -1190,8 +1171,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
return -EEXIST;
}
- /*
- * TODO: Check the pending MOVE from other objects where this
+ /* TODO: Check the pending MOVE from other objects where this
* object is a destination object.
*/
@@ -1234,8 +1214,7 @@ static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
return -EEXIST;
}
- /*
- * Check if there are pending DEL or MOVE commands for this
+ /* Check if there are pending DEL or MOVE commands for this
* MAC/VLAN/VLAN-MAC. Return an error if so.
*/
memcpy(&query_elem, elem, sizeof(query_elem));
@@ -1286,8 +1265,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
- /*
- * Check if we can perform this operation based on the current registry
+ /* Check if we can perform this operation based on the current registry
* state.
*/
if (!src_o->check_move(bp, src_o, dest_o,
@@ -1296,8 +1274,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
return -EINVAL;
}
- /*
- * Check if there is an already pending DEL or MOVE command for the
+ /* Check if there is an already pending DEL or MOVE command for the
* source object or ADD command for a destination object. Return an
* error if so.
*/
@@ -1386,7 +1363,7 @@ static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
}
/**
- * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
+ * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
*
* @bp: device handle
* @o: bnx2x_vlan_mac_obj
@@ -1407,7 +1384,7 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
/* Wait until there are no pending commands */
if (!bnx2x_exe_queue_empty(exeq))
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
else
return 0;
}
@@ -1415,6 +1392,32 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
return -EBUSY;
}
+static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long *ramrod_flags)
+{
+ int rc = 0;
+
+ spin_lock_bh(&o->exe_queue.lock);
+
+ DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
+ rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
+
+ if (rc != 0) {
+ __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
+
+ /* Calling function should not diffrentiate between this case
+ * and the case in which there is already a pending ramrod
+ */
+ rc = 1;
+ } else {
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ }
+ spin_unlock_bh(&o->exe_queue.lock);
+
+ return rc;
+}
+
/**
* bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
*
@@ -1432,19 +1435,27 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
struct bnx2x_raw_obj *r = &o->raw;
int rc;
+ /* Clearing the pending list & raw state should be made
+ * atomically (as execution flow assumes they represent the same).
+ */
+ spin_lock_bh(&o->exe_queue.lock);
+
/* Reset pending list */
- bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
+ __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
/* Clear pending */
r->clear_pending(r);
+ spin_unlock_bh(&o->exe_queue.lock);
+
/* If ramrod failed this is most likely a SW bug */
if (cqe->message.error)
return -EINVAL;
- /* Run the next bulk of pending commands if requeted */
+ /* Run the next bulk of pending commands if requested */
if (test_bit(RAMROD_CONT, ramrod_flags)) {
- rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
+
if (rc < 0)
return rc;
}
@@ -1532,7 +1543,7 @@ static inline int bnx2x_vlan_mac_get_registry_elem(
bool restore,
struct bnx2x_vlan_mac_registry_elem **re)
{
- int cmd = elem->cmd_data.vlan_mac.cmd;
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
struct bnx2x_vlan_mac_registry_elem *reg_elem;
/* Allocate a new registry element if needed. */
@@ -1544,9 +1555,8 @@ static inline int bnx2x_vlan_mac_get_registry_elem(
/* Get a new CAM offset */
if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
- /*
- * This shell never happen, because we have checked the
- * CAM availiability in the 'validate'.
+ /* This shall never happen, because we have checked the
+ * CAM availability in the 'validate'.
*/
WARN_ON(1);
kfree(reg_elem);
@@ -1591,10 +1601,9 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
struct bnx2x_vlan_mac_registry_elem *reg_elem;
- int cmd;
+ enum bnx2x_vlan_mac_cmd cmd;
- /*
- * If DRIVER_ONLY execution is requested, cleanup a registry
+ /* If DRIVER_ONLY execution is requested, cleanup a registry
* and exit. Otherwise send a ramrod to FW.
*/
if (!drv_only) {
@@ -1603,11 +1612,10 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
/* Set pending */
r->set_pending(r);
- /* Fill tha ramrod data */
+ /* Fill the ramrod data */
list_for_each_entry(elem, exe_chunk, link) {
cmd = elem->cmd_data.vlan_mac.cmd;
- /*
- * We will add to the target object in MOVE command, so
+ /* We will add to the target object in MOVE command, so
* change the object for a CAM search.
*/
if (cmd == BNX2X_VLAN_MAC_MOVE)
@@ -1640,12 +1648,11 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
idx++;
}
- /*
- * No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
*/
rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
@@ -1741,9 +1748,8 @@ static inline int bnx2x_vlan_mac_push_new_cmd(
* @p:
*
*/
-int bnx2x_config_vlan_mac(
- struct bnx2x *bp,
- struct bnx2x_vlan_mac_ramrod_params *p)
+int bnx2x_config_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p)
{
int rc = 0;
struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
@@ -1760,8 +1766,7 @@ int bnx2x_config_vlan_mac(
return rc;
}
- /*
- * If nothing will be executed further in this iteration we want to
+ /* If nothing will be executed further in this iteration we want to
* return PENDING if there are pending commands
*/
if (!bnx2x_exe_queue_empty(&o->exe_queue))
@@ -1775,18 +1780,17 @@ int bnx2x_config_vlan_mac(
/* Execute commands if required */
if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
- rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
+ &p->ramrod_flags);
if (rc < 0)
return rc;
}
- /*
- * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
+ /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
* then user want to wait until the last command is done.
*/
if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
- /*
- * Wait maximum for the current exe_queue length iterations plus
+ /* Wait maximum for the current exe_queue length iterations plus
* one (for the current pending command).
*/
int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
@@ -1800,8 +1804,9 @@ int bnx2x_config_vlan_mac(
return rc;
/* Make a next step */
- rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
- ramrod_flags);
+ rc = __bnx2x_vlan_mac_execute_step(bp,
+ p->vlan_mac_obj,
+ &p->ramrod_flags);
if (rc < 0)
return rc;
}
@@ -1812,8 +1817,6 @@ int bnx2x_config_vlan_mac(
return rc;
}
-
-
/**
* bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
*
@@ -1823,7 +1826,7 @@ int bnx2x_config_vlan_mac(
* @ramrod_flags: execution flags to be used for this deletion
*
* if the last operation has completed successfully and there are no
- * moreelements left, positive value if the last operation has completed
+ * more elements left, positive value if the last operation has completed
* successfully and there are more previously configured elements, negative
* value is current operation has failed.
*/
@@ -1833,18 +1836,21 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
unsigned long *ramrod_flags)
{
struct bnx2x_vlan_mac_registry_elem *pos = NULL;
- int rc = 0;
struct bnx2x_vlan_mac_ramrod_params p;
struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+ unsigned long flags;
+ int read_lock;
+ int rc = 0;
/* Clear pending commands first */
spin_lock_bh(&exeq->lock);
list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
- if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
- *vlan_mac_flags) {
+ flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
+ if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
rc = exeq->remove(bp, exeq->owner, exeq_pos);
if (rc) {
BNX2X_ERR("Failed to remove command\n");
@@ -1852,6 +1858,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
return rc;
}
list_del(&exeq_pos->link);
+ bnx2x_exe_queue_free_elem(bp, exeq_pos);
}
}
@@ -1863,26 +1870,36 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
p.ramrod_flags = *ramrod_flags;
p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
- /*
- * Add all but the last VLAN-MAC to the execution queue without actually
+ /* Add all but the last VLAN-MAC to the execution queue without actually
* execution anything.
*/
__clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
__clear_bit(RAMROD_CONT, &p.ramrod_flags);
+ DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+ if (read_lock != 0)
+ return read_lock;
+
list_for_each_entry(pos, &o->head, link) {
- if (pos->vlan_mac_flags == *vlan_mac_flags) {
+ flags = pos->vlan_mac_flags;
+ if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
rc = bnx2x_config_vlan_mac(bp, &p);
if (rc < 0) {
BNX2X_ERR("Failed to add a new DEL command\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
return rc;
}
}
}
+ DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
+
p.ramrod_flags = *ramrod_flags;
__set_bit(RAMROD_CONT, &p.ramrod_flags);
@@ -1914,6 +1931,9 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
struct bnx2x_credit_pool_obj *vlans_pool)
{
INIT_LIST_HEAD(&o->head);
+ o->head_reader = 0;
+ o->head_exe_request = false;
+ o->saved_ramrod_flags = 0;
o->macs_pool = macs_pool;
o->vlans_pool = vlans_pool;
@@ -1927,7 +1947,6 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
state, pstate, type);
}
-
void bnx2x_init_mac_obj(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *mac_obj,
u8 cl_id, u32 cid, u8 func_id, void *rdata,
@@ -2010,6 +2029,7 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
vlan_obj->check_move = bnx2x_check_move;
vlan_obj->ramrod_cmd =
RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+ vlan_obj->get_n_elements = bnx2x_get_n_elements;
/* Exe Queue */
bnx2x_exe_queue_init(bp,
@@ -2022,71 +2042,6 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
}
}
-void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac_obj,
- u8 cl_id, u32 cid, u8 func_id, void *rdata,
- dma_addr_t rdata_mapping, int state,
- unsigned long *pstate, bnx2x_obj_type type,
- struct bnx2x_credit_pool_obj *macs_pool,
- struct bnx2x_credit_pool_obj *vlans_pool)
-{
- union bnx2x_qable_obj *qable_obj =
- (union bnx2x_qable_obj *)vlan_mac_obj;
-
- bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
- rdata_mapping, state, pstate, type,
- macs_pool, vlans_pool);
-
- /* CAM pool handling */
- vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
- vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
- /*
- * CAM offset is relevant for 57710 and 57711 chips only which have a
- * single CAM for both MACs and VLAN-MAC pairs. So the offset
- * will be taken from MACs' pool object only.
- */
- vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
- vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
-
- if (CHIP_IS_E1(bp)) {
- BNX2X_ERR("Do not support chips others than E2\n");
- BUG();
- } else if (CHIP_IS_E1H(bp)) {
- vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
- vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
- vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
- vlan_mac_obj->check_move = bnx2x_check_move_always_err;
- vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
-
- /* Exe Queue */
- bnx2x_exe_queue_init(bp,
- &vlan_mac_obj->exe_queue, 1, qable_obj,
- bnx2x_validate_vlan_mac,
- bnx2x_remove_vlan_mac,
- bnx2x_optimize_vlan_mac,
- bnx2x_execute_vlan_mac,
- bnx2x_exeq_get_vlan_mac);
- } else {
- vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
- vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
- vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
- vlan_mac_obj->check_move = bnx2x_check_move;
- vlan_mac_obj->ramrod_cmd =
- RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
-
- /* Exe Queue */
- bnx2x_exe_queue_init(bp,
- &vlan_mac_obj->exe_queue,
- CLASSIFY_RULES_COUNT,
- qable_obj, bnx2x_validate_vlan_mac,
- bnx2x_remove_vlan_mac,
- bnx2x_optimize_vlan_mac,
- bnx2x_execute_vlan_mac,
- bnx2x_exeq_get_vlan_mac);
- }
-
-}
-
/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
static inline void __storm_memset_mac_filters(struct bnx2x *bp,
struct tstorm_eth_mac_filter_config *mac_filters,
@@ -2103,18 +2058,18 @@ static inline void __storm_memset_mac_filters(struct bnx2x *bp,
static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
struct bnx2x_rx_mode_ramrod_params *p)
{
- /* update the bp MAC filter structure */
+ /* update the bp MAC filter structure */
u32 mask = (1 << p->cl_id);
struct tstorm_eth_mac_filter_config *mac_filters =
(struct tstorm_eth_mac_filter_config *)p->rdata;
- /* initial seeting is drop-all */
+ /* initial setting is drop-all */
u8 drop_all_ucast = 1, drop_all_mcast = 1;
u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
u8 unmatched_unicast = 0;
- /* In e1x there we only take into account rx acceot flag since tx switching
+ /* In e1x there we only take into account rx accept flag since tx switching
* isn't enabled. */
if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
/* accept matched ucast */
@@ -2166,7 +2121,7 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
mac_filters->unmatched_unicast & ~mask;
DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
- "accp_mcast 0x%x\naccp_bcast 0x%x\n",
+ "accp_mcast 0x%x\naccp_bcast 0x%x\n",
mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
mac_filters->bcast_accept_all);
@@ -2176,7 +2131,7 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
/* The operation is completed */
clear_bit(p->state, p->pstate);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return 0;
}
@@ -2186,12 +2141,12 @@ static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
struct eth_classify_header *hdr,
u8 rule_cnt)
{
- hdr->echo = cid;
+ hdr->echo = cpu_to_le32(cid);
hdr->rule_cnt = rule_cnt;
}
static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
- unsigned long accept_flags,
+ unsigned long *accept_flags,
struct eth_filter_rules_cmd *cmd,
bool clear_accept_all)
{
@@ -2201,33 +2156,33 @@ static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
- if (accept_flags) {
- if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
- state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
- if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
- state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+ if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
- if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
- state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
- state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
- }
+ if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+ }
- if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
- state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
- state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
- }
- if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
- state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+ if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
+ state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+ }
- if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
- state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
- state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
- }
- if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
- state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+ if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
+ state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+
+ if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
}
+ if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
+ state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+
/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
if (clear_accept_all) {
state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
@@ -2237,7 +2192,6 @@ static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
}
cmd->state = cpu_to_le16(state);
-
}
static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
@@ -2260,8 +2214,9 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_TX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
- &(data->rules[rule_idx++]), false);
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
+ &(data->rules[rule_idx++]),
+ false);
}
/* Rx */
@@ -2272,13 +2227,12 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_RX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
- &(data->rules[rule_idx++]), false);
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
+ &(data->rules[rule_idx++]),
+ false);
}
-
- /*
- * If FCoE Queue configuration has been requested configure the Rx and
+ /* If FCoE Queue configuration has been requested configure the Rx and
* internal switching modes for this queue in separate rules.
*
* FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
@@ -2293,9 +2247,10 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_TX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
- &(data->rules[rule_idx++]),
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
+ &(data->rules[rule_idx]),
true);
+ rule_idx++;
}
/* Rx */
@@ -2306,14 +2261,14 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_RX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
- &(data->rules[rule_idx++]),
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
+ &(data->rules[rule_idx]),
true);
+ rule_idx++;
}
}
- /*
- * Set the ramrod header (most importantly - number of rules to
+ /* Set the ramrod header (most importantly - number of rules to
* configure).
*/
bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
@@ -2322,12 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->header.rule_cnt, p->rx_accept_flags,
p->tx_accept_flags);
- /*
- * No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -2429,7 +2383,7 @@ static int bnx2x_mcast_wait(struct bnx2x *bp,
static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
struct bnx2x_mcast_obj *o,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
int total_sz;
struct bnx2x_pending_mcast_cmd *new_cmd;
@@ -2464,7 +2418,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
cur_mac = (struct bnx2x_mcast_mac_elem *)
((u8 *)new_cmd + sizeof(*new_cmd));
- /* Push the MACs of the current command into the pendig command
+ /* Push the MACs of the current command into the pending command
* MACs list: FIFO
*/
list_for_each_entry(pos, &p->mcast_list, link) {
@@ -2561,7 +2515,7 @@ static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
struct bnx2x_mcast_obj *o, int idx,
union bnx2x_mcast_config_data *cfg_data,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_raw_obj *r = &o->raw;
struct eth_multicast_rules_ramrod_data *data =
@@ -2625,7 +2579,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e2(
int *rdata_idx)
{
int cur_bin, cnt = *rdata_idx;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
/* go through the registry and configure the bins from it */
for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
@@ -2657,7 +2611,7 @@ static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
{
struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
int cnt = *line_idx;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
link) {
@@ -2780,7 +2734,7 @@ static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
int *line_idx)
{
struct bnx2x_mcast_list_elem *mlist_pos;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
int cnt = *line_idx;
list_for_each_entry(mlist_pos, &p->mcast_list, link) {
@@ -2790,7 +2744,7 @@ static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
cnt++;
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- mlist_pos->mac);
+ mlist_pos->mac);
}
*line_idx = cnt;
@@ -2827,7 +2781,8 @@ static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
* Returns number of lines filled in the ramrod data in total.
*/
static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd,
int start_cnt)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
@@ -2861,7 +2816,7 @@ static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
int reg_sz = o->get_registry_size(o);
@@ -2896,7 +2851,6 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
default:
BNX2X_ERR("Unknown command: %d\n", cmd);
return -EINVAL;
-
}
/* Increase the total number of MACs pending to be configured */
@@ -2930,8 +2884,9 @@ static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
struct eth_multicast_rules_ramrod_data *data =
(struct eth_multicast_rules_ramrod_data *)(r->rdata);
- data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
- (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+ data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (BNX2X_FILTER_MCAST_PENDING <<
+ BNX2X_SWCID_SHIFT));
data->header.rule_cnt = len;
}
@@ -2965,7 +2920,7 @@ static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
struct bnx2x_mcast_obj *o = p->mcast_obj;
@@ -3020,20 +2975,18 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
if (!o->total_pending_num)
bnx2x_mcast_refresh_registry_e2(bp, o);
- /*
- * If CLEAR_ONLY was requested - don't send a ramrod and clear
+ /* If CLEAR_ONLY was requested - don't send a ramrod and clear
* RAMROD_PENDING status immediately.
*/
if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
raw->clear_pending(raw);
return 0;
} else {
- /*
- * No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -3051,7 +3004,7 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
/* Mark, that there is a work to do */
if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
@@ -3085,7 +3038,7 @@ static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
- mlist_pos->mac, bit);
+ mlist_pos->mac, bit);
/* bookkeeping... */
BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
@@ -3107,13 +3060,13 @@ static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
}
}
-/* On 57711 we write the multicast MACs' aproximate match
+/* On 57711 we write the multicast MACs' approximate match
* table by directly into the TSTORM's internal RAM. So we don't
* really need to handle any tricks to make it work.
*/
static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
int i;
struct bnx2x_mcast_obj *o = p->mcast_obj;
@@ -3167,7 +3120,7 @@ static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
int reg_sz = o->get_registry_size(o);
@@ -3209,7 +3162,6 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
default:
BNX2X_ERR("Unknown command: %d\n", cmd);
return -EINVAL;
-
}
/* We want to ensure that commands are executed one by one for 57710.
@@ -3231,7 +3183,7 @@ static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
/* If current command hasn't been handled yet and we are
* here means that it's meant to be dropped and we have to
- * update the number of outstandling MACs accordingly.
+ * update the number of outstanding MACs accordingly.
*/
if (p->mcast_list_len)
o->total_pending_num -= o->max_cmd_len;
@@ -3240,7 +3192,7 @@ static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
struct bnx2x_mcast_obj *o, int idx,
union bnx2x_mcast_config_data *cfg_data,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_raw_obj *r = &o->raw;
struct mac_configuration_cmd *data =
@@ -3284,9 +3236,10 @@ static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
BNX2X_MAX_MULTICAST*(1 + r->func_id));
data->hdr.offset = offset;
- data->hdr.client_id = 0xff;
- data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
- (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+ data->hdr.client_id = cpu_to_le16(0xff);
+ data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (BNX2X_FILTER_MCAST_PENDING <<
+ BNX2X_SWCID_SHIFT));
data->hdr.length = len;
}
@@ -3309,7 +3262,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1(
{
struct bnx2x_mcast_mac_elem *elem;
int i = 0;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
/* go through the registry and configure the MACs from it. */
list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
@@ -3319,7 +3272,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1(
i++;
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- cfg_data.mac);
+ cfg_data.mac);
}
*rdata_idx = i;
@@ -3327,17 +3280,15 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1(
return -1;
}
-
static inline int bnx2x_mcast_handle_pending_cmds_e1(
struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
{
struct bnx2x_pending_mcast_cmd *cmd_pos;
struct bnx2x_mcast_mac_elem *pmac_pos;
struct bnx2x_mcast_obj *o = p->mcast_obj;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
int cnt = 0;
-
/* If nothing to be done - return */
if (list_empty(&o->pending_cmds_head))
return 0;
@@ -3355,7 +3306,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1(
cnt++;
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- pmac_pos->mac);
+ pmac_pos->mac);
}
break;
@@ -3458,7 +3409,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
struct bnx2x_raw_obj *raw = &o->raw;
@@ -3508,20 +3459,18 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
if (rc)
return rc;
- /*
- * If CLEAR_ONLY was requested - don't send a ramrod and clear
+ /* If CLEAR_ONLY was requested - don't send a ramrod and clear
* RAMROD_PENDING status immediately.
*/
if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
raw->clear_pending(raw);
return 0;
} else {
- /*
- * No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -3535,7 +3484,6 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
/* Ramrod completion is pending */
return 1;
}
-
}
static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
@@ -3562,7 +3510,7 @@ static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
int bnx2x_config_mcast(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
struct bnx2x_raw_obj *r = &o->raw;
@@ -3628,16 +3576,16 @@ error_exit1:
static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(o->sched_state, o->raw.pstate);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(o->sched_state, o->raw.pstate);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
@@ -3833,7 +3781,6 @@ static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
return true;
}
-
static bool bnx2x_credit_pool_get_entry(
struct bnx2x_credit_pool_obj *o,
int *offset)
@@ -3984,8 +3931,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
} else {
- /*
- * CAM credit is equaly divided between all active functions
+ /* CAM credit is equaly divided between all active functions
* on the PATH.
*/
if ((func_num > 0)) {
@@ -3994,8 +3940,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
else
cam_sz = BNX2X_CAM_SIZE_EMUL;
- /*
- * No need for CAM entries handling for 57712 and
+ /* No need for CAM entries handling for 57712 and
* newer.
*/
bnx2x_init_credit_pool(p, -1, cam_sz);
@@ -4003,7 +3948,6 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
/* this should never happen! Block MAC operations. */
bnx2x_init_credit_pool(p, 0, 0);
}
-
}
}
@@ -4013,14 +3957,12 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
u8 func_num)
{
if (CHIP_IS_E1x(bp)) {
- /*
- * There is no VLAN credit in HW on 57710 and 57711 only
+ /* There is no VLAN credit in HW on 57710 and 57711 only
* MAC / MAC-VLAN can be set
*/
bnx2x_init_credit_pool(p, 0, -1);
} else {
- /*
- * CAM credit is equaly divided between all active functions
+ /* CAM credit is equally divided between all active functions
* on the PATH.
*/
if (func_num > 0) {
@@ -4036,7 +3978,7 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
/**
* bnx2x_debug_print_ind_table - prints the indirection table configuration.
*
- * @bp: driver hanlde
+ * @bp: driver handle
* @p: pointer to rss configuration
*
* Prints it when NETIF_MSG_IFUP debug level is configured.
@@ -4085,8 +4027,8 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
DP(BNX2X_MSG_SP, "Configuring RSS\n");
/* Set an echo field */
- data->echo = (r->cid & BNX2X_SWCID_MASK) |
- (r->state << BNX2X_SWCID_SHIFT);
+ data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (r->state << BNX2X_SWCID_SHIFT));
/* RSS mode */
if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
@@ -4149,12 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
}
- /*
- * No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -4183,8 +4124,11 @@ int bnx2x_config_rss(struct bnx2x *bp,
struct bnx2x_raw_obj *r = &o->raw;
/* Do nothing if only driver cleanup was requested */
- if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
+ p->ramrod_flags);
return 0;
+ }
r->set_pending(r);
@@ -4200,7 +4144,6 @@ int bnx2x_config_rss(struct bnx2x *bp,
return rc;
}
-
void bnx2x_init_rss_config_obj(struct bnx2x *bp,
struct bnx2x_rss_config_obj *rss_obj,
u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
@@ -4237,11 +4180,16 @@ int bnx2x_queue_state_change(struct bnx2x *bp,
unsigned long *pending = &o->pending;
/* Check that the requested transition is legal */
- if (o->check_transition(bp, o, params))
+ rc = o->check_transition(bp, o, params);
+ if (rc) {
+ BNX2X_ERR("check transition returned an error. rc %d\n", rc);
return -EINVAL;
+ }
/* Set "pending" bit */
+ DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
pending_bit = o->set_pending(o, params);
+ DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
/* Don't send a command if only driver cleanup was requested */
if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
@@ -4252,7 +4200,7 @@ int bnx2x_queue_state_change(struct bnx2x *bp,
if (rc) {
o->next_state = BNX2X_Q_STATE_MAX;
clear_bit(pending_bit, pending);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return rc;
}
@@ -4268,7 +4216,6 @@ int bnx2x_queue_state_change(struct bnx2x *bp,
return !!test_bit(pending_bit, pending);
}
-
static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
struct bnx2x_queue_state_params *params)
{
@@ -4317,7 +4264,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
}
if (o->next_tx_only >= o->max_cos)
- /* >= becuase tx only must always be smaller than cos since the
+ /* >= because tx only must always be smaller than cos since the
* primary connection supports COS 0
*/
BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
@@ -4341,7 +4288,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
wmb();
clear_bit(cmd, &o->pending);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return 0;
}
@@ -4383,7 +4330,6 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
gen_data->mtu = cpu_to_le16(params->mtu);
gen_data->func_id = o->func_id;
-
gen_data->cos = params->cos;
gen_data->traffic_type =
@@ -4412,6 +4358,12 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
tx_data->force_default_pri_flg =
test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
+ tx_data->tunnel_lso_inc_ip_id =
+ test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
+ tx_data->tunnel_non_lso_pcsum_location =
+ test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
+ PCSUM_ON_BD;
+
tx_data->tx_status_block_id = params->fw_sb_id;
tx_data->tx_sb_index_number = params->sb_cq_index;
tx_data->tss_leading_client_id = params->tss_leading_cl_id;
@@ -4504,7 +4456,6 @@ static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
cpu_to_le16(params->silent_removal_value);
rx_data->silent_vlan_mask =
cpu_to_le16(params->silent_removal_mask);
-
}
/* initialize the general, tx and rx parts of a queue object */
@@ -4626,14 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
/* Fill the ramrod data */
bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
- /*
- * No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4655,14 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
bnx2x_q_fill_setup_data_e2(bp, params, rdata);
- /*
- * No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4680,7 +4627,6 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
&params->params.tx_only;
u8 cid_index = tx_only_params->cid_index;
-
if (cid_index >= o->max_cos) {
BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
o->cl_id, cid_index);
@@ -4701,14 +4647,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
o->cids[cid_index], rdata->general.client_id,
rdata->general.sp_client_id, rdata->general.cos);
- /*
- * No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4735,7 +4679,7 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp,
test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
&params->update_flags);
- /* Outer VLAN sripping */
+ /* Outer VLAN stripping */
data->outer_vlan_removal_enable_flg =
test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
data->outer_vlan_removal_change_flg =
@@ -4771,6 +4715,13 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp,
test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
+
+ /* tx switching */
+ data->tx_switching_flg =
+ test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, &params->update_flags);
+ data->tx_switching_change_flg =
+ test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ &params->update_flags);
}
static inline int bnx2x_q_send_update(struct bnx2x *bp,
@@ -4790,21 +4741,18 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
return -EINVAL;
}
-
/* Clear the ramrod data */
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data */
bnx2x_q_fill_update_data(bp, o, update_params, rdata);
- /*
- * No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
o->cids[cid_index], U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4851,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp,
return bnx2x_q_send_update(bp, params);
}
+static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj,
+ struct bnx2x_queue_update_tpa_params *params,
+ struct tpa_update_ramrod_data *data)
+{
+ data->client_id = obj->cl_id;
+ data->complete_on_both_clients = params->complete_on_both_clients;
+ data->dont_verify_rings_pause_thr_flg =
+ params->dont_verify_thr;
+ data->max_agg_size = cpu_to_le16(params->max_agg_sz);
+ data->max_sges_for_packet = params->max_sges_pkt;
+ data->max_tpa_queues = params->max_tpa_queues;
+ data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
+ data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
+ data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
+ data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
+ data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
+ data->tpa_mode = params->tpa_mode;
+ data->update_ipv4 = params->update_ipv4;
+ data->update_ipv6 = params->update_ipv6;
+}
+
static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
struct bnx2x_queue_state_params *params)
{
- /* TODO: Not implemented yet. */
- return -1;
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct tpa_update_ramrod_data *rdata =
+ (struct tpa_update_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_queue_update_tpa_params *update_tpa_params =
+ &params->params.update_tpa;
+ u16 type;
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
+
+ /* Add the function id inside the type, so that sp post function
+ * doesn't automatically add the PF func-id, this is required
+ * for operations done by PFs on behalf of their VFs
+ */
+ type = ETH_CONNECTION_TYPE |
+ ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
+
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
+ o->cids[BNX2X_PRIMARY_CID_INDEX],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), type);
}
static inline int bnx2x_q_send_halt(struct bnx2x *bp,
@@ -5012,8 +5011,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
&params->params.update;
u8 next_tx_only = o->num_tx_only;
- /*
- * Forget all pending for completion commands if a driver only state
+ /* Forget all pending for completion commands if a driver only state
* transition has been requested.
*/
if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
@@ -5021,12 +5019,14 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
o->next_state = BNX2X_Q_STATE_MAX;
}
- /*
- * Don't allow a next state transition if we are in the middle of
+ /* Don't allow a next state transition if we are in the middle of
* the previous one.
*/
- if (o->pending)
+ if (o->pending) {
+ BNX2X_ERR("Blocking transition since pending was %lx\n",
+ o->pending);
return -EBUSY;
+ }
switch (state) {
case BNX2X_Q_STATE_RESET:
@@ -5199,6 +5199,27 @@ void bnx2x_init_queue_obj(struct bnx2x *bp,
obj->set_pending = bnx2x_queue_set_pending;
}
+/* return a queue object's logical state*/
+int bnx2x_get_q_logical_state(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj)
+{
+ switch (obj->state) {
+ case BNX2X_Q_STATE_ACTIVE:
+ case BNX2X_Q_STATE_MULTI_COS:
+ return BNX2X_Q_LOGICAL_STATE_ACTIVE;
+ case BNX2X_Q_STATE_RESET:
+ case BNX2X_Q_STATE_INITIALIZED:
+ case BNX2X_Q_STATE_MCOS_TERMINATED:
+ case BNX2X_Q_STATE_INACTIVE:
+ case BNX2X_Q_STATE_STOPPED:
+ case BNX2X_Q_STATE_TERMINATED:
+ case BNX2X_Q_STATE_FLRED:
+ return BNX2X_Q_LOGICAL_STATE_STOPPED;
+ default:
+ return -EINVAL;
+ }
+}
+
/********************** Function state object *********************************/
enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
struct bnx2x_func_sp_obj *o)
@@ -5207,8 +5228,7 @@ enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
if (o->pending)
return BNX2X_F_STATE_MAX;
- /*
- * unsure the order of reading of o->pending and o->state
+ /* unsure the order of reading of o->pending and o->state
* o->pending should be read first
*/
rmb();
@@ -5259,7 +5279,7 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
wmb();
clear_bit(cmd, &o->pending);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return 0;
}
@@ -5306,8 +5326,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
enum bnx2x_func_cmd cmd = params->cmd;
- /*
- * Forget all pending for completion commands if a driver only state
+ /* Forget all pending for completion commands if a driver only state
* transition has been requested.
*/
if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
@@ -5315,8 +5334,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
o->next_state = BNX2X_F_STATE_MAX;
}
- /*
- * Don't allow a next state transition if we are in the middle of
+ /* Don't allow a next state transition if we are in the middle of
* the previous one.
*/
if (o->pending)
@@ -5489,7 +5507,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
goto init_err;
}
- /* Handle the beginning of COMMON_XXX pases separatelly... */
+ /* Handle the beginning of COMMON_XXX pases separately... */
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
rc = bnx2x_func_init_cmn_chip(bp, drv);
@@ -5523,7 +5541,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
init_err:
drv->gunzip_end(bp);
- /* In case of success, complete the comand immediatelly: no ramrods
+ /* In case of success, complete the command immediately: no ramrods
* have been sent.
*/
if (!rc)
@@ -5548,7 +5566,7 @@ static inline void bnx2x_func_reset_func(struct bnx2x *bp,
}
/**
- * bnx2x_func_reset_port - reser HW at port stage
+ * bnx2x_func_reset_port - reset HW at port stage
*
* @bp: device handle
* @drv:
@@ -5570,7 +5588,7 @@ static inline void bnx2x_func_reset_port(struct bnx2x *bp,
}
/**
- * bnx2x_func_reset_cmn - reser HW at common stage
+ * bnx2x_func_reset_cmn - reset HW at common stage
*
* @bp: device handle
* @drv:
@@ -5586,7 +5604,6 @@ static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
drv->reset_hw_cmn(bp);
}
-
static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
struct bnx2x_func_state_params *params)
{
@@ -5613,7 +5630,7 @@ static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
break;
}
- /* Complete the comand immediatelly: no ramrods have been sent. */
+ /* Complete the command immediately: no ramrods have been sent. */
o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
return 0;
@@ -5631,17 +5648,18 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->function_mode = (u8)start_params->mf_mode;
- rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
- rdata->path_id = BP_PATH(bp);
- rdata->network_cos_mode = start_params->network_cos_mode;
-
- /*
- * No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ rdata->function_mode = (u8)start_params->mf_mode;
+ rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
+ rdata->path_id = BP_PATH(bp);
+ rdata->network_cos_mode = start_params->network_cos_mode;
+ rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
+ rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
*/
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
@@ -5666,6 +5684,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
rdata->tx_switch_suspend = switch_update_params->suspend;
rdata->echo = SWITCH_UPDATE;
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
@@ -5693,11 +5717,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
rdata->allowed_priorities = afex_update_params->allowed_priorities;
rdata->echo = AFEX_UPDATE;
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
DP(BNX2X_MSG_SP,
"afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
@@ -5716,21 +5740,20 @@ inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
struct bnx2x_func_sp_obj *o = params->f_obj;
struct afex_vif_list_ramrod_data *rdata =
(struct afex_vif_list_ramrod_data *)o->afex_rdata;
- struct bnx2x_func_afex_viflists_params *afex_viflist_params =
+ struct bnx2x_func_afex_viflists_params *afex_vif_params =
&params->params.afex_viflists;
u64 *p_rdata = (u64 *)rdata;
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->vif_list_index = afex_viflist_params->vif_list_index;
- rdata->func_bit_map = afex_viflist_params->func_bit_map;
- rdata->afex_vif_list_command =
- afex_viflist_params->afex_vif_list_command;
- rdata->func_to_clear = afex_viflist_params->func_to_clear;
+ rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
+ rdata->func_bit_map = afex_vif_params->func_bit_map;
+ rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
+ rdata->func_to_clear = afex_vif_params->func_to_clear;
/* send in echo type of sub command */
- rdata->echo = afex_viflist_params->afex_vif_list_command;
+ rdata->echo = afex_vif_params->afex_vif_list_command;
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
@@ -5783,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
rdata->traffic_type_to_priority_cos[i] =
tx_start_params->traffic_type_to_priority_cos[i];
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
@@ -5897,7 +5926,7 @@ int bnx2x_func_state_change(struct bnx2x *bp,
if (rc) {
o->next_state = BNX2X_F_STATE_MAX;
clear_bit(cmd, pending);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index adbd91b1bdf..718ecd29466 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1,6 +1,6 @@
/* bnx2x_sp.h: Broadcom Everest network driver.
*
- * Copyright (c) 2011-2012 Broadcom Corporation
+ * Copyright (c) 2011-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Vladislav Zolotarov
*
*/
@@ -34,8 +34,7 @@ enum {
RAMROD_RESTORE,
/* Execute the next command now */
RAMROD_EXEC,
- /*
- * Don't add a new command and continue execution of posponed
+ /* Don't add a new command and continue execution of postponed
* commands. If not set a new command will be added to the
* pending commands list.
*/
@@ -54,7 +53,7 @@ typedef enum {
BNX2X_OBJ_TYPE_RX_TX,
} bnx2x_obj_type;
-/* Filtering states */
+/* Public slow path states */
enum {
BNX2X_FILTER_MAC_PENDING,
BNX2X_FILTER_VLAN_PENDING,
@@ -100,6 +99,7 @@ struct bnx2x_raw_obj {
/************************* VLAN-MAC commands related parameters ***************/
struct bnx2x_mac_ramrod_data {
u8 mac[ETH_ALEN];
+ u8 is_inner_mac;
};
struct bnx2x_vlan_ramrod_data {
@@ -108,6 +108,7 @@ struct bnx2x_vlan_ramrod_data {
struct bnx2x_vlan_mac_ramrod_data {
u8 mac[ETH_ALEN];
+ u8 is_inner_mac;
u16 vlan;
};
@@ -127,8 +128,7 @@ enum bnx2x_vlan_mac_cmd {
struct bnx2x_vlan_mac_data {
/* Requested command: BNX2X_VLAN_MAC_XX */
enum bnx2x_vlan_mac_cmd cmd;
- /*
- * used to contain the data related vlan_mac_flags bits from
+ /* used to contain the data related vlan_mac_flags bits from
* ramrod parameters.
*/
unsigned long vlan_mac_flags;
@@ -188,14 +188,10 @@ typedef struct bnx2x_exeq_elem *
struct bnx2x_exeq_elem *elem);
struct bnx2x_exe_queue_obj {
- /*
- * Commands pending for an execution.
- */
+ /* Commands pending for an execution. */
struct list_head exe_queue;
- /*
- * Commands pending for an completion.
- */
+ /* Commands pending for an completion. */
struct list_head pending_comp;
spinlock_t lock;
@@ -243,14 +239,13 @@ struct bnx2x_exe_queue_obj {
};
/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
/*
- * Element in the VLAN_MAC registry list having all currenty configured
+ * Element in the VLAN_MAC registry list having all currently configured
* rules.
*/
struct bnx2x_vlan_mac_registry_elem {
struct list_head link;
- /*
- * Used to store the cam offset used for the mac/vlan/vlan-mac.
+ /* Used to store the cam offset used for the mac/vlan/vlan-mac.
* Relevant for 57710 and 57711 only. VLANs and MACs share the
* same CAM for these chips.
*/
@@ -271,6 +266,13 @@ enum {
BNX2X_DONT_CONSUME_CAM_CREDIT,
BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
};
+/* When looking for matching filters, some flags are not interesting */
+#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
+ 1 << BNX2X_ETH_MAC | \
+ 1 << BNX2X_ISCSI_ETH_MAC | \
+ 1 << BNX2X_NETQ_ETH_MAC)
+#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
+ ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
struct bnx2x_vlan_mac_ramrod_params {
/* Object to run the command from */
@@ -290,6 +292,12 @@ struct bnx2x_vlan_mac_obj {
* entries.
*/
struct list_head head;
+ /* Implement a simple reader/writer lock on the head list.
+ * all these fields should only be accessed under the exe_queue lock
+ */
+ u8 head_reader; /* Num. of readers accessing head list */
+ bool head_exe_request; /* Pending execution request. */
+ unsigned long saved_ramrod_flags; /* Ramrods of pending execution */
/* TODO: Add it's initialization in the init functions */
struct bnx2x_exe_queue_obj exe_queue;
@@ -308,13 +316,14 @@ struct bnx2x_vlan_mac_obj {
* @param n number of elements to get
* @param buf buffer preallocated by caller into which elements
* will be copied. Note elements are 4-byte aligned
- * so buffer size must be able to accomodate the
+ * so buffer size must be able to accommodate the
* aligned elements.
*
* @return number of copied bytes
*/
- int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
- int n, u8 *buf);
+ int (*get_n_elements)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, int n, u8 *base,
+ u8 stride, u8 size);
/**
* Checks if ADD-ramrod with the given params may be performed.
@@ -392,7 +401,7 @@ struct bnx2x_vlan_mac_obj {
* @param bp
* @param p Command parameters (RAMROD_COMP_WAIT bit in
* ramrod_flags is only taken into an account)
- * @param ppos a pointer to the cooky that should be given back in the
+ * @param ppos a pointer to the cookie that should be given back in the
* next call to make function handle the next element. If
* *ppos is set to NULL it will restart the iterator.
* If returned *ppos == NULL this means that the last
@@ -405,7 +414,7 @@ struct bnx2x_vlan_mac_obj {
struct bnx2x_vlan_mac_registry_elem **ppos);
/**
- * Should be called on a completion arival.
+ * Should be called on a completion arrival.
*
* @param bp
* @param o
@@ -439,12 +448,9 @@ enum {
BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
};
-void bnx2x_set_mac_in_nig(struct bnx2x *bp,
- bool add, unsigned char *dev_addr, int index);
-
/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
-/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in
+/* RX_MODE ramrod special flags: set in rx_mode_flags field in
* a bnx2x_rx_mode_ramrod_params.
*/
enum {
@@ -472,8 +478,7 @@ struct bnx2x_rx_mode_ramrod_params {
unsigned long ramrod_flags;
unsigned long rx_mode_flags;
- /*
- * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
+ /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
* a tstorm_eth_mac_filter_config (e1x).
*/
void *rdata;
@@ -524,7 +529,7 @@ struct bnx2x_mcast_ramrod_params {
int mcast_list_len;
};
-enum {
+enum bnx2x_mcast_cmd {
BNX2X_MCAST_CMD_ADD,
BNX2X_MCAST_CMD_CONT,
BNX2X_MCAST_CMD_DEL,
@@ -573,7 +578,8 @@ struct bnx2x_mcast_obj {
* @param cmd command to execute (BNX2X_MCAST_CMD_X, see above)
*/
int (*config_mcast)(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
/**
* Fills the ramrod data during the RESTORE flow.
@@ -590,11 +596,13 @@ struct bnx2x_mcast_obj {
int start_bin, int *rdata_idx);
int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
void (*set_one_rule)(struct bnx2x *bp,
struct bnx2x_mcast_obj *o, int idx,
- union bnx2x_mcast_config_data *cfg_data, int cmd);
+ union bnx2x_mcast_config_data *cfg_data,
+ enum bnx2x_mcast_cmd cmd);
/** Checks if there are more mcast MACs to be set or a previous
* command is still pending.
@@ -617,7 +625,8 @@ struct bnx2x_mcast_obj {
* feasible.
*/
int (*validate)(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
/**
* Restore the values of internal counters in case of a failure.
@@ -639,12 +648,11 @@ struct bnx2x_credit_pool_obj {
/* Maximum allowed credit. put() will check against it. */
int pool_sz;
- /*
- * Allocate a pool table statically.
+ /* Allocate a pool table statically.
*
- * Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272)
+ * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272)
*
- * The set bit in the table will mean that the entry is available.
+ * The set bit in the table will mean that the entry is available.
*/
#define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64)
u64 pool_mirror[BNX2X_POOL_VEC_SIZE];
@@ -759,7 +767,9 @@ enum {
BNX2X_Q_UPDATE_DEF_VLAN_EN,
BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
- BNX2X_Q_UPDATE_SILENT_VLAN_REM
+ BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ BNX2X_Q_UPDATE_TX_SWITCHING
};
/* Allowed Queue states */
@@ -776,6 +786,12 @@ enum bnx2x_q_state {
BNX2X_Q_STATE_MAX,
};
+/* Allowed Queue states */
+enum bnx2x_q_logical_state {
+ BNX2X_Q_LOGICAL_STATE_ACTIVE,
+ BNX2X_Q_LOGICAL_STATE_STOPPED,
+};
+
/* Allowed commands */
enum bnx2x_queue_cmd {
BNX2X_Q_CMD_INIT,
@@ -814,10 +830,12 @@ enum {
BNX2X_Q_FLG_TX_SEC,
BNX2X_Q_FLG_ANTI_SPOOF,
BNX2X_Q_FLG_SILENT_VLAN_REM,
- BNX2X_Q_FLG_FORCE_DEFAULT_PRI
+ BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
+ BNX2X_Q_FLG_PCSUM_ON_PKT,
+ BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
};
-/* Queue type options: queue type may be a compination of below. */
+/* Queue type options: queue type may be a combination of below. */
enum bnx2x_q_type {
/** TODO: Consider moving both these flags into the init()
* ramrod params.
@@ -832,6 +850,7 @@ enum bnx2x_q_type {
#define BNX2X_MULTI_TX_COS_E3B0 3
#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */
+#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
struct bnx2x_queue_init_params {
struct {
@@ -874,6 +893,24 @@ struct bnx2x_queue_update_params {
u8 cid_index;
};
+struct bnx2x_queue_update_tpa_params {
+ dma_addr_t sge_map;
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 max_tpa_queues;
+ u8 max_sges_pkt;
+ u8 complete_on_both_clients;
+ u8 dont_verify_thr;
+ u8 tpa_mode;
+ u8 _pad;
+
+ u16 sge_buff_sz;
+ u16 max_agg_sz;
+
+ u16 sge_pause_thr_low;
+ u16 sge_pause_thr_high;
+};
+
struct rxq_pause_params {
u16 bd_th_lo;
u16 bd_th_hi;
@@ -968,6 +1005,7 @@ struct bnx2x_queue_state_params {
/* Params according to the current command */
union {
struct bnx2x_queue_update_params update;
+ struct bnx2x_queue_update_tpa_params update_tpa;
struct bnx2x_queue_setup_params setup;
struct bnx2x_queue_init_params init;
struct bnx2x_queue_setup_tx_only_params tx_only;
@@ -986,10 +1024,9 @@ struct bnx2x_queue_sp_obj {
u8 cl_id;
u8 func_id;
- /*
- * number of traffic classes supported by queue.
- * The primary connection of the queue suppotrs the first traffic
- * class. Any further traffic class is suppoted by a tx-only
+ /* number of traffic classes supported by queue.
+ * The primary connection of the queue supports the first traffic
+ * class. Any further traffic class is supported by a tx-only
* connection.
*
* Therefore max_cos is also a number of valid entries in the cids
@@ -1005,7 +1042,7 @@ struct bnx2x_queue_sp_obj {
/* BNX2X_Q_CMD_XX bits. This object implements "one
* pending" paradigm but for debug and tracing purposes it's
- * more convinient to have different bits for different
+ * more convenient to have different bits for different
* commands.
*/
unsigned long pending;
@@ -1108,6 +1145,15 @@ struct bnx2x_func_start_params {
/* Function cos mode */
u8 network_cos_mode;
+
+ /* NVGRE classification enablement */
+ u8 nvgre_clss_en;
+
+ /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
+ u8 gre_tunnel_mode;
+
+ /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
+ u8 gre_tunnel_rss;
};
struct bnx2x_func_switch_update_params {
@@ -1185,7 +1231,7 @@ struct bnx2x_func_sp_obj {
/* BNX2X_FUNC_CMD_XX bits. This object implements "one
* pending" paradigm but for debug and tracing purposes it's
- * more convinient to have different bits for different
+ * more convenient to have different bits for different
* commands.
*/
unsigned long pending;
@@ -1261,6 +1307,9 @@ void bnx2x_init_queue_obj(struct bnx2x *bp,
int bnx2x_queue_state_change(struct bnx2x *bp,
struct bnx2x_queue_state_params *params);
+int bnx2x_get_q_logical_state(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj);
+
/********************* VLAN-MAC ****************/
void bnx2x_init_mac_obj(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *mac_obj,
@@ -1276,16 +1325,14 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
unsigned long *pstate, bnx2x_obj_type type,
struct bnx2x_credit_pool_obj *vlans_pool);
-void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac_obj,
- u8 cl_id, u32 cid, u8 func_id, void *rdata,
- dma_addr_t rdata_mapping, int state,
- unsigned long *pstate, bnx2x_obj_type type,
- struct bnx2x_credit_pool_obj *macs_pool,
- struct bnx2x_credit_pool_obj *vlans_pool);
-
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
int bnx2x_config_vlan_mac(struct bnx2x *bp,
- struct bnx2x_vlan_mac_ramrod_params *p);
+ struct bnx2x_vlan_mac_ramrod_params *p);
int bnx2x_vlan_mac_move(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params *p,
@@ -1301,7 +1348,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
*
* @p: Command parameters
*
- * Return: 0 - if operation was successfull and there is no pending completions,
+ * Return: 0 - if operation was successful and there is no pending completions,
* positive number - if there are pending completions,
* negative - if there were errors
*/
@@ -1333,12 +1380,13 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
* the current command will be enqueued to the tail of the
* pending commands list.
*
- * Return: 0 is operation was successfull and there are no pending completions,
+ * Return: 0 is operation was successful and there are no pending completions,
* negative if there were errors, positive if there are pending
* completions.
*/
int bnx2x_config_mcast(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
/****************** CREDIT POOL ****************/
void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
@@ -1348,7 +1396,6 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
struct bnx2x_credit_pool_obj *p, u8 func_id,
u8 func_num);
-
/****************** RSS CONFIGURATION ****************/
void bnx2x_init_rss_config_obj(struct bnx2x *bp,
struct bnx2x_rss_config_obj *rss_obj,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
new file mode 100644
index 00000000000..eda8583f6fc
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -0,0 +1,3001 @@
+/* bnx2x_sriov.c: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
+ *
+ */
+#include "bnx2x.h"
+#include "bnx2x_init.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_sp.h"
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+
+/* General service functions */
+static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+ u16 pf_id)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+}
+
+static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+ u8 enable)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+}
+
+int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
+{
+ int idx;
+
+ for_each_vf(bp, idx)
+ if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
+ break;
+ return idx;
+}
+
+static
+struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
+{
+ u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
+ return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
+}
+
+static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ u8 igu_sb_id, u8 segment, u16 index, u8 op,
+ u8 update)
+{
+ /* acking a VF sb through the PF - use the GRC */
+ u32 ctl;
+ u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
+ u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
+ u32 func_encode = vf->abs_vfid;
+ u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
+ struct igu_regular cmd_data = {0};
+
+ cmd_data.sb_id_and_flags =
+ ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
+ (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+ (update << IGU_REGULAR_BUPDATE_SHIFT) |
+ (op << IGU_REGULAR_ENABLE_INT_SHIFT));
+
+ ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
+ func_encode << IGU_CTRL_REG_FID_SHIFT |
+ IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
+
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ cmd_data.sb_id_and_flags, igu_addr_data);
+ REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
+ mmiowb();
+ barrier();
+
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ ctl, igu_addr_ctl);
+ REG_WR(bp, igu_addr_ctl, ctl);
+ mmiowb();
+ barrier();
+}
+
+static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ bool print_err)
+{
+ if (!bnx2x_leading_vfq(vf, sp_initialized)) {
+ if (print_err)
+ BNX2X_ERR("Slowpath objects not yet initialized!\n");
+ else
+ DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
+ return false;
+ }
+ return true;
+}
+
+/* VFOP operations states */
+void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx)
+{
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
+ vf->abs_vfid,
+ q_idx,
+ sb_idx,
+ init_params->tx.sb_cq_index,
+ init_params->tx.hc_rate,
+ setup_params->flags,
+ setup_params->txq_params.traffic_type);
+}
+
+void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx)
+{
+ struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
+ "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
+ vf->abs_vfid,
+ q_idx,
+ sb_idx,
+ init_params->rx.sb_cq_index,
+ init_params->rx.hc_rate,
+ setup_params->gen_params.mtu,
+ rxq_params->buf_sz,
+ rxq_params->sge_buf_sz,
+ rxq_params->max_sges_pkt,
+ rxq_params->tpa_agg_sz,
+ setup_params->flags,
+ rxq_params->drop_flags,
+ rxq_params->cache_line_log);
+}
+
+void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q,
+ struct bnx2x_vf_queue_construct_params *p,
+ unsigned long q_type)
+{
+ struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
+ struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
+
+ /* INIT */
+
+ /* Enable host coalescing in the transition to INIT state */
+ if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
+
+ if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
+
+ /* FW SB ID */
+ init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+ init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+
+ /* context */
+ init_p->cxts[0] = q->cxt;
+
+ /* SETUP */
+
+ /* Setup-op general parameters */
+ setup_p->gen_params.spcl_id = vf->sp_cl_id;
+ setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
+
+ /* Setup-op pause params:
+ * Nothing to do, the pause thresholds are set by default to 0 which
+ * effectively turns off the feature for this queue. We don't want
+ * one queue (VF) to interfering with another queue (another VF)
+ */
+ if (vf->cfg_flags & VF_CFG_FW_FC)
+ BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
+ vf->abs_vfid);
+ /* Setup-op flags:
+ * collect statistics, zero statistics, local-switching, security,
+ * OV for Flex10, RSS and MCAST for leading
+ */
+ if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
+ __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
+
+ /* for VFs, enable tx switching, bd coherency, and mac address
+ * anti-spoofing
+ */
+ __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
+ __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
+ __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
+
+ /* Setup-op rx parameters */
+ if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
+ struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
+
+ rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
+ rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+ rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
+ rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
+ }
+
+ /* Setup-op tx parameters */
+ if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
+ setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
+ setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+ }
+}
+
+static int bnx2x_vf_queue_create(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_queue_construct_params *qctor)
+{
+ struct bnx2x_queue_state_params *q_params;
+ int rc = 0;
+
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
+
+ /* Prepare ramrod information */
+ q_params = &qctor->qstate;
+ q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
+
+ if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
+ goto out;
+ }
+
+ /* Run Queue 'construction' ramrods */
+ q_params->cmd = BNX2X_Q_CMD_INIT;
+ rc = bnx2x_queue_state_change(bp, q_params);
+ if (rc)
+ goto out;
+
+ memcpy(&q_params->params.setup, &qctor->prep_qsetup,
+ sizeof(struct bnx2x_queue_setup_params));
+ q_params->cmd = BNX2X_Q_CMD_SETUP;
+ rc = bnx2x_queue_state_change(bp, q_params);
+ if (rc)
+ goto out;
+
+ /* enable interrupts */
+ bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
+ USTORM_ID, 0, IGU_INT_ENABLE, 0);
+out:
+ return rc;
+}
+
+static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid)
+{
+ enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
+ BNX2X_Q_CMD_TERMINATE,
+ BNX2X_Q_CMD_CFC_DEL};
+ struct bnx2x_queue_state_params q_params;
+ int rc, i;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ /* Prepare ramrod information */
+ memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
+ q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+ if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_STOPPED) {
+ DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
+ goto out;
+ }
+
+ /* Run Queue 'destruction' ramrods */
+ for (i = 0; i < ARRAY_SIZE(cmds); i++) {
+ q_params.cmd = cmds[i];
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
+ return rc;
+ }
+ }
+out:
+ /* Clean Context */
+ if (bnx2x_vfq(vf, qid, cxt)) {
+ bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
+ bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
+ }
+
+ return 0;
+}
+
+static void
+bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
+{
+ struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+ if (vf) {
+ /* the first igu entry belonging to VFs of this PF */
+ if (!BP_VFDB(bp)->first_vf_igu_entry)
+ BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
+
+ /* the first igu entry belonging to this VF */
+ if (!vf_sb_count(vf))
+ vf->igu_base_id = igu_sb_id;
+
+ ++vf_sb_count(vf);
+ ++vf->sb_count;
+ }
+ BP_VFDB(bp)->vf_sbs_pool++;
+}
+
+static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *obj,
+ atomic_t *counter)
+{
+ struct list_head *pos;
+ int read_lock;
+ int cnt = 0;
+
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
+ if (read_lock)
+ DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
+
+ list_for_each(pos, &obj->head)
+ cnt++;
+
+ if (!read_lock)
+ bnx2x_vlan_mac_h_read_unlock(bp, obj);
+
+ atomic_set(counter, cnt);
+}
+
+static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid, bool drv_only, bool mac)
+{
+ struct bnx2x_vlan_mac_ramrod_params ramrod;
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
+ mac ? "MACs" : "VLANs");
+
+ /* Prepare ramrod params */
+ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
+ if (mac) {
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+ } else {
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+ }
+ ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+
+ set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
+ else
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+
+ /* Start deleting */
+ rc = ramrod.vlan_mac_obj->delete_all(bp,
+ ramrod.vlan_mac_obj,
+ &ramrod.user_req.vlan_mac_flags,
+ &ramrod.ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("Failed to delete all %s\n",
+ mac ? "MACs" : "VLANs");
+ return rc;
+ }
+
+ /* Clear the vlan counters */
+ if (!mac)
+ atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
+
+ return 0;
+}
+
+static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_mac_vlan_filter *filter,
+ bool drv_only)
+{
+ struct bnx2x_vlan_mac_ramrod_params ramrod;
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
+ vf->abs_vfid, filter->add ? "Adding" : "Deleting",
+ filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
+
+ /* Prepare ramrod params */
+ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
+ if (filter->type == BNX2X_VF_FILTER_VLAN) {
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+ ramrod.user_req.u.vlan.vlan = filter->vid;
+ } else {
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+ memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+ }
+ ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
+ BNX2X_VLAN_MAC_DEL;
+
+ /* Verify there are available vlan credits */
+ if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
+ (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
+ vf_vlan_rules_cnt(vf))) {
+ BNX2X_ERR("No credits for vlan [%d >= %d]\n",
+ atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
+ vf_vlan_rules_cnt(vf));
+ return -ENOMEM;
+ }
+
+ set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
+ else
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+
+ /* Add/Remove the filter */
+ rc = bnx2x_config_vlan_mac(bp, &ramrod);
+ if (rc && rc != -EEXIST) {
+ BNX2X_ERR("Failed to %s %s\n",
+ filter->add ? "add" : "delete",
+ filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
+ "VLAN");
+ return rc;
+ }
+
+ /* Update the vlan counters */
+ if (filter->type == BNX2X_VF_FILTER_VLAN)
+ bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
+ &bnx2x_vfq(vf, qid, vlan_count));
+
+ return 0;
+}
+
+int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mac_vlan_filters *filters,
+ int qid, bool drv_only)
+{
+ int rc = 0, i;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
+
+ /* Prepare ramrod params */
+ for (i = 0; i < filters->count; i++) {
+ rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
+ &filters->filters[i], drv_only);
+ if (rc)
+ break;
+ }
+
+ /* Rollback if needed */
+ if (i != filters->count) {
+ BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
+ i, filters->count + 1);
+ while (--i >= 0) {
+ filters->filters[i].add = !filters->filters[i].add;
+ bnx2x_vf_mac_vlan_config(bp, vf, qid,
+ &filters->filters[i],
+ drv_only);
+ }
+ }
+
+ /* It's our responsibility to free the filters */
+ kfree(filters);
+
+ return rc;
+}
+
+int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_queue_construct_params *qctor)
+{
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
+
+ rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
+ if (rc)
+ goto op_err;
+
+ /* Configure vlan0 for leading queue */
+ if (!qid) {
+ struct bnx2x_vf_mac_vlan_filter filter;
+
+ memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
+ filter.type = BNX2X_VF_FILTER_VLAN;
+ filter.add = true;
+ filter.vid = 0;
+ rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
+ if (rc)
+ goto op_err;
+ }
+
+ /* Schedule the configuration of any pending vlan filters */
+ vf->cfg_flags |= VF_CFG_VLAN;
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ BNX2X_MSG_IOV);
+ return 0;
+op_err:
+ BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
+ return rc;
+}
+
+static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid)
+{
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
+
+ /* If needed, clean the filtering data base */
+ if ((qid == LEADING_IDX) &&
+ bnx2x_validate_vf_sp_objs(bp, vf, false)) {
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
+ if (rc)
+ goto op_err;
+ }
+
+ /* Terminate queue */
+ if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
+ struct bnx2x_queue_state_params qstate;
+
+ memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
+ qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
+ qstate.cmd = BNX2X_Q_CMD_TERMINATE;
+ set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
+ rc = bnx2x_queue_state_change(bp, &qstate);
+ if (rc)
+ goto op_err;
+ }
+
+ return 0;
+op_err:
+ BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
+ return rc;
+}
+
+int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
+{
+ struct bnx2x_mcast_list_elem *mc = NULL;
+ struct bnx2x_mcast_ramrod_params mcast;
+ int rc, i;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ /* Prepare Multicast command */
+ memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
+ mcast.mcast_obj = &vf->mcast_obj;
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
+ else
+ set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
+ if (mc_num) {
+ mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
+ GFP_KERNEL);
+ if (!mc) {
+ BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ /* clear existing mcasts */
+ mcast.mcast_list_len = vf->mcast_list_len;
+ vf->mcast_list_len = mc_num;
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
+ if (rc) {
+ BNX2X_ERR("Failed to remove multicasts\n");
+ if (mc)
+ kfree(mc);
+ return rc;
+ }
+
+ /* update mcast list on the ramrod params */
+ if (mc_num) {
+ INIT_LIST_HEAD(&mcast.mcast_list);
+ for (i = 0; i < mc_num; i++) {
+ mc[i].mac = mcasts[i];
+ list_add_tail(&mc[i].link,
+ &mcast.mcast_list);
+ }
+
+ /* add new mcasts */
+ mcast.mcast_list_len = mc_num;
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
+ if (rc)
+ BNX2X_ERR("Faled to add multicasts\n");
+ kfree(mc);
+ }
+
+ return rc;
+}
+
+static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
+ struct bnx2x_rx_mode_ramrod_params *ramrod,
+ struct bnx2x_virtf *vf,
+ unsigned long accept_flags)
+{
+ struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
+
+ memset(ramrod, 0, sizeof(*ramrod));
+ ramrod->cid = vfq->cid;
+ ramrod->cl_id = vfq_cl_id(vf, vfq);
+ ramrod->rx_mode_obj = &bp->rx_mode_obj;
+ ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
+ ramrod->rx_accept_flags = accept_flags;
+ ramrod->tx_accept_flags = accept_flags;
+ ramrod->pstate = &vf->filter_state;
+ ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
+
+ set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ set_bit(RAMROD_RX, &ramrod->ramrod_flags);
+ set_bit(RAMROD_TX, &ramrod->ramrod_flags);
+
+ ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
+ ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+}
+
+int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid, unsigned long accept_flags)
+{
+ struct bnx2x_rx_mode_ramrod_params ramrod;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+ vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
+ return bnx2x_config_rx_mode(bp, &ramrod);
+}
+
+int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
+{
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
+
+ /* Remove all classification configuration for leading queue */
+ if (qid == LEADING_IDX) {
+ rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
+ if (rc)
+ goto op_err;
+
+ /* Remove filtering if feasible */
+ if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+ false, false);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+ false, true);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
+ if (rc)
+ goto op_err;
+ }
+ }
+
+ /* Destroy queue */
+ rc = bnx2x_vf_queue_destroy(bp, vf, qid);
+ if (rc)
+ goto op_err;
+ return rc;
+op_err:
+ BNX2X_ERR("vf[%d:%d] error: rc %d\n",
+ vf->abs_vfid, qid, rc);
+ return rc;
+}
+
+/* VF enable primitives
+ * when pretend is required the caller is responsible
+ * for calling pretend prior to calling these routines
+ */
+
+/* internal vf enable - until vf is enabled internally all transactions
+ * are blocked. This routine should always be called last with pretend.
+ */
+static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
+{
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
+}
+
+/* clears vf error in all semi blocks */
+static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
+{
+ REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
+ REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
+ REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
+ REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
+}
+
+static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
+{
+ u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
+ u32 was_err_reg = 0;
+
+ switch (was_err_group) {
+ case 0:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
+ break;
+ case 1:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
+ break;
+ case 2:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
+ break;
+ case 3:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
+ break;
+ }
+ REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
+}
+
+static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int i;
+ u32 val;
+
+ /* Set VF masks and configuration - pretend */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
+
+ val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
+ val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
+ if (vf->cfg_flags & VF_CFG_INT_SIMD)
+ val |= IGU_VF_CONF_SINGLE_ISR_EN;
+ val &= ~IGU_VF_CONF_PARENT_MASK;
+ val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
+ REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
+
+ DP(BNX2X_MSG_IOV,
+ "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
+ vf->abs_vfid, val);
+
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* iterate over all queues, clear sb consumer */
+ for (i = 0; i < vf_sb_count(vf); i++) {
+ u8 igu_sb_id = vf_igu_sb(vf, i);
+
+ /* zero prod memory */
+ REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
+
+ /* clear sb state machine */
+ bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
+ false /* VF */);
+
+ /* disable + update */
+ bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
+ IGU_INT_DISABLE, 1);
+ }
+}
+
+void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
+{
+ /* set the VF-PF association in the FW */
+ storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
+ storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
+
+ /* clear vf errors*/
+ bnx2x_vf_semi_clear_err(bp, abs_vfid);
+ bnx2x_vf_pglue_clear_err(bp, abs_vfid);
+
+ /* internal vf-enable - pretend */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
+ DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
+ bnx2x_vf_enable_internal(bp, true);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ /* Reset vf in IGU interrupts are still disabled */
+ bnx2x_vf_igu_reset(bp, vf);
+
+ /* pretend to enable the vf with the PBF */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+ REG_WR(bp, PBF_REG_DISABLE_VF, 0);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
+{
+ struct pci_dev *dev;
+ struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+
+ if (!vf)
+ return false;
+
+ dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
+ if (dev)
+ return bnx2x_is_pcie_pending(dev);
+ return false;
+}
+
+int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
+{
+ /* Verify no pending pci transactions */
+ if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
+ BNX2X_ERR("PCIE Transactions still pending\n");
+
+ return 0;
+}
+
+static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ int new)
+{
+ int num = vf_vlan_rules_cnt(vf);
+ int diff = new - num;
+ bool rc = true;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
+ vf->abs_vfid, new, num);
+
+ if (diff > 0)
+ rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
+ else if (diff < 0)
+ rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
+
+ if (rc)
+ vf_vlan_rules_cnt(vf) = new;
+ else
+ DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
+ vf->abs_vfid);
+}
+
+/* must be called after the number of PF queues and the number of VFs are
+ * both known
+ */
+static void
+bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct vf_pf_resc_request *resc = &vf->alloc_resc;
+ u16 vlan_count = 0;
+
+ /* will be set only during VF-ACQUIRE */
+ resc->num_rxqs = 0;
+ resc->num_txqs = 0;
+
+ /* no credit calculations for macs (just yet) */
+ resc->num_mac_filters = 1;
+
+ /* divvy up vlan rules */
+ bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
+ vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
+ vlan_count = 1 << ilog2(vlan_count);
+ bnx2x_iov_re_set_vlan_filters(bp, vf,
+ vlan_count / BNX2X_NR_VIRTFN(bp));
+
+ /* no real limitation */
+ resc->num_mc_filters = 0;
+
+ /* num_sbs already set */
+ resc->num_sbs = vf->sb_count;
+}
+
+/* FLR routines: */
+static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ /* reset the state variables */
+ bnx2x_iov_static_resc(bp, vf);
+ vf->state = VF_FREE;
+}
+
+static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
+
+ /* DQ usage counter */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+ bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
+ "DQ VF usage counter timed out",
+ poll_cnt);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* FW cleanup command - poll for the results */
+ if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
+ poll_cnt))
+ BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
+
+ /* verify TX hw is flushed */
+ bnx2x_tx_hw_flushed(bp, poll_cnt);
+}
+
+static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int rc, i;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ /* the cleanup operations are valid if and only if the VF
+ * was first acquired.
+ */
+ for (i = 0; i < vf_rxq_count(vf); i++) {
+ rc = bnx2x_vf_queue_flr(bp, vf, i);
+ if (rc)
+ goto out;
+ }
+
+ /* remove multicasts */
+ bnx2x_vf_mcast(bp, vf, NULL, 0, true);
+
+ /* dispatch final cleanup and wait for HW queues to flush */
+ bnx2x_vf_flr_clnup_hw(bp, vf);
+
+ /* release VF resources */
+ bnx2x_vf_free_resc(bp, vf);
+
+ /* re-open the mailbox */
+ bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+ return;
+out:
+ BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
+ vf->abs_vfid, i, rc);
+}
+
+static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
+{
+ struct bnx2x_virtf *vf;
+ int i;
+
+ for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
+ /* VF should be RESET & in FLR cleanup states */
+ if (bnx2x_vf(bp, i, state) != VF_RESET ||
+ !bnx2x_vf(bp, i, flr_clnup_stage))
+ continue;
+
+ DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
+ i, BNX2X_NR_VIRTFN(bp));
+
+ vf = BP_VF(bp, i);
+
+ /* lock the vf pf channel */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
+
+ /* invoke the VF FLR SM */
+ bnx2x_vf_flr(bp, vf);
+
+ /* mark the VF to be ACKED and continue */
+ vf->flr_clnup_stage = false;
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
+ }
+
+ /* Acknowledge the handled VFs.
+ * we are acknowledge all the vfs which an flr was requested for, even
+ * if amongst them there are such that we never opened, since the mcp
+ * will interrupt us immediately again if we only ack some of the bits,
+ * resulting in an endless loop. This can happen for example in KVM
+ * where an 'all ones' flr request is sometimes given by hyper visor
+ */
+ DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
+ bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
+ for (i = 0; i < FLRD_VFS_DWORDS; i++)
+ SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
+ bp->vfdb->flrd_vfs[i]);
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
+
+ /* clear the acked bits - better yet if the MCP implemented
+ * write to clear semantics
+ */
+ for (i = 0; i < FLRD_VFS_DWORDS; i++)
+ SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
+}
+
+void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
+{
+ int i;
+
+ /* Read FLR'd VFs */
+ for (i = 0; i < FLRD_VFS_DWORDS; i++)
+ bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
+
+ DP(BNX2X_MSG_MCP,
+ "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
+ bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
+
+ for_each_vf(bp, i) {
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+ u32 reset = 0;
+
+ if (vf->abs_vfid < 32)
+ reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
+ else
+ reset = bp->vfdb->flrd_vfs[1] &
+ (1 << (vf->abs_vfid - 32));
+
+ if (reset) {
+ /* set as reset and ready for cleanup */
+ vf->state = VF_RESET;
+ vf->flr_clnup_stage = true;
+
+ DP(BNX2X_MSG_IOV,
+ "Initiating Final cleanup for VF %d\n",
+ vf->abs_vfid);
+ }
+ }
+
+ /* do the FLR cleanup for all marked VFs*/
+ bnx2x_vf_flr_clnup(bp);
+}
+
+/* IOV global initialization routines */
+void bnx2x_iov_init_dq(struct bnx2x *bp)
+{
+ if (!IS_SRIOV(bp))
+ return;
+
+ /* Set the DQ such that the CID reflect the abs_vfid */
+ REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
+ REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
+
+ /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
+ * the PF L2 queues
+ */
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
+
+ /* The VF window size is the log2 of the max number of CIDs per VF */
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
+
+ /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
+ * the Pf doorbell size although the 2 are independent.
+ */
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
+
+ /* No security checks for now -
+ * configure single rule (out of 16) mask = 0x1, value = 0x0,
+ * CID range 0 - 0x1ffff
+ */
+ REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
+ REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
+ REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
+ REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
+
+ /* set the VF doorbell threshold. This threshold represents the amount
+ * of doorbells allowed in the main DORQ fifo for a specific VF.
+ */
+ REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
+}
+
+void bnx2x_iov_init_dmae(struct bnx2x *bp)
+{
+ if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
+ REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
+}
+
+static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
+{
+ struct pci_dev *dev = bp->pdev;
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+ return dev->bus->number + ((dev->devfn + iov->offset +
+ iov->stride * vfid) >> 8);
+}
+
+static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
+{
+ struct pci_dev *dev = bp->pdev;
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+ return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
+}
+
+static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int i, n;
+ struct pci_dev *dev = bp->pdev;
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+ for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
+ u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
+ u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
+
+ size /= iov->total;
+ vf->bars[n].bar = start + size * vf->abs_vfid;
+ vf->bars[n].size = size;
+ }
+}
+
+static int bnx2x_ari_enabled(struct pci_dev *dev)
+{
+ return dev->bus->self && dev->bus->self->ari_enabled;
+}
+
+static void
+bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
+{
+ int sb_id;
+ u32 val;
+ u8 fid, current_pf = 0;
+
+ /* IGU in normal mode - read CAM */
+ for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
+ val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
+ if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
+ continue;
+ fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
+ if (fid & IGU_FID_ENCODE_IS_PF)
+ current_pf = fid & IGU_FID_PF_NUM_MASK;
+ else if (current_pf == BP_FUNC(bp))
+ bnx2x_vf_set_igu_info(bp, sb_id,
+ (fid & IGU_FID_VF_NUM_MASK));
+ DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
+ ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
+ ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
+ (fid & IGU_FID_VF_NUM_MASK)), sb_id,
+ GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
+ }
+ DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
+}
+
+static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
+{
+ if (bp->vfdb) {
+ kfree(bp->vfdb->vfqs);
+ kfree(bp->vfdb->vfs);
+ kfree(bp->vfdb);
+ }
+ bp->vfdb = NULL;
+}
+
+static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
+{
+ int pos;
+ struct pci_dev *dev = bp->pdev;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ BNX2X_ERR("failed to find SRIOV capability in device\n");
+ return -ENODEV;
+ }
+
+ iov->pos = pos;
+ DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
+ pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+ pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
+ pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+ pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+ pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
+ pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+ return 0;
+}
+
+static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
+{
+ u32 val;
+
+ /* read the SRIOV capability structure
+ * The fields can be read via configuration read or
+ * directly from the device (starting at offset PCICFG_OFFSET)
+ */
+ if (bnx2x_sriov_pci_cfg_info(bp, iov))
+ return -ENODEV;
+
+ /* get the number of SRIOV bars */
+ iov->nres = 0;
+
+ /* read the first_vfid */
+ val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
+ iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
+ * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
+
+ DP(BNX2X_MSG_IOV,
+ "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
+ BP_FUNC(bp),
+ iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
+ iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
+
+ return 0;
+}
+
+/* must be called after PF bars are mapped */
+int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
+ int num_vfs_param)
+{
+ int err, i;
+ struct bnx2x_sriov *iov;
+ struct pci_dev *dev = bp->pdev;
+
+ bp->vfdb = NULL;
+
+ /* verify is pf */
+ if (IS_VF(bp))
+ return 0;
+
+ /* verify sriov capability is present in configuration space */
+ if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
+ return 0;
+
+ /* verify chip revision */
+ if (CHIP_IS_E1x(bp))
+ return 0;
+
+ /* check if SRIOV support is turned off */
+ if (!num_vfs_param)
+ return 0;
+
+ /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
+ if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
+ BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
+ BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
+ return 0;
+ }
+
+ /* SRIOV can be enabled only with MSIX */
+ if (int_mode_param == BNX2X_INT_MODE_MSI ||
+ int_mode_param == BNX2X_INT_MODE_INTX) {
+ BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
+ return 0;
+ }
+
+ err = -EIO;
+ /* verify ari is enabled */
+ if (!bnx2x_ari_enabled(bp->pdev)) {
+ BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
+ return 0;
+ }
+
+ /* verify igu is in normal mode */
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
+ return 0;
+ }
+
+ /* allocate the vfs database */
+ bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
+ if (!bp->vfdb) {
+ BNX2X_ERR("failed to allocate vf database\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* get the sriov info - Linux already collected all the pertinent
+ * information, however the sriov structure is for the private use
+ * of the pci module. Also we want this information regardless
+ * of the hyper-visor.
+ */
+ iov = &(bp->vfdb->sriov);
+ err = bnx2x_sriov_info(bp, iov);
+ if (err)
+ goto failed;
+
+ /* SR-IOV capability was enabled but there are no VFs*/
+ if (iov->total == 0)
+ goto failed;
+
+ iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
+
+ DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
+ num_vfs_param, iov->nr_virtfn);
+
+ /* allocate the vf array */
+ bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
+ BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
+ if (!bp->vfdb->vfs) {
+ BNX2X_ERR("failed to allocate vf array\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
+ for_each_vf(bp, i) {
+ bnx2x_vf(bp, i, index) = i;
+ bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
+ bnx2x_vf(bp, i, state) = VF_FREE;
+ mutex_init(&bnx2x_vf(bp, i, op_mutex));
+ bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
+ }
+
+ /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
+ bnx2x_get_vf_igu_cam_info(bp);
+
+ /* allocate the queue arrays for all VFs */
+ bp->vfdb->vfqs = kzalloc(
+ BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
+ GFP_KERNEL);
+
+ DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
+
+ if (!bp->vfdb->vfqs) {
+ BNX2X_ERR("failed to allocate vf queue array\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* Prepare the VFs event synchronization mechanism */
+ mutex_init(&bp->vfdb->event_mutex);
+
+ return 0;
+failed:
+ DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
+ __bnx2x_iov_free_vfdb(bp);
+ return err;
+}
+
+void bnx2x_iov_remove_one(struct bnx2x *bp)
+{
+ int vf_idx;
+
+ /* if SRIOV is not enabled there's nothing to do */
+ if (!IS_SRIOV(bp))
+ return;
+
+ DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
+ pci_disable_sriov(bp->pdev);
+ DP(BNX2X_MSG_IOV, "sriov disabled\n");
+
+ /* disable access to all VFs */
+ for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
+ bnx2x_pretend_func(bp,
+ HW_VF_HANDLE(bp,
+ bp->vfdb->sriov.first_vf_in_pf +
+ vf_idx));
+ DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
+ bp->vfdb->sriov.first_vf_in_pf + vf_idx);
+ bnx2x_vf_enable_internal(bp, 0);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+ }
+
+ /* free vf database */
+ __bnx2x_iov_free_vfdb(bp);
+}
+
+void bnx2x_iov_free_mem(struct bnx2x *bp)
+{
+ int i;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ /* free vfs hw contexts */
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+ struct hw_dma *cxt = &bp->vfdb->context[i];
+ BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
+ }
+
+ BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
+ BP_VFDB(bp)->sp_dma.mapping,
+ BP_VFDB(bp)->sp_dma.size);
+
+ BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
+ BP_VF_MBX_DMA(bp)->mapping,
+ BP_VF_MBX_DMA(bp)->size);
+
+ BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
+ BP_VF_BULLETIN_DMA(bp)->mapping,
+ BP_VF_BULLETIN_DMA(bp)->size);
+}
+
+int bnx2x_iov_alloc_mem(struct bnx2x *bp)
+{
+ size_t tot_size;
+ int i, rc = 0;
+
+ if (!IS_SRIOV(bp))
+ return rc;
+
+ /* allocate vfs hw contexts */
+ tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
+ BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
+
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+ struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
+ cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
+
+ if (cxt->size) {
+ cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
+ if (!cxt->addr)
+ goto alloc_mem_err;
+ } else {
+ cxt->addr = NULL;
+ cxt->mapping = 0;
+ }
+ tot_size -= cxt->size;
+ }
+
+ /* allocate vfs ramrods dma memory - client_init and set_mac */
+ tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
+ BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
+ tot_size);
+ if (!BP_VFDB(bp)->sp_dma.addr)
+ goto alloc_mem_err;
+ BP_VFDB(bp)->sp_dma.size = tot_size;
+
+ /* allocate mailboxes */
+ tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
+ BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
+ tot_size);
+ if (!BP_VF_MBX_DMA(bp)->addr)
+ goto alloc_mem_err;
+
+ BP_VF_MBX_DMA(bp)->size = tot_size;
+
+ /* allocate local bulletin boards */
+ tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
+ BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
+ tot_size);
+ if (!BP_VF_BULLETIN_DMA(bp)->addr)
+ goto alloc_mem_err;
+
+ BP_VF_BULLETIN_DMA(bp)->size = tot_size;
+
+ return 0;
+
+alloc_mem_err:
+ return -ENOMEM;
+}
+
+static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q)
+{
+ u8 cl_id = vfq_cl_id(vf, q);
+ u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
+ unsigned long q_type = 0;
+
+ set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+ set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+
+ /* Queue State object */
+ bnx2x_init_queue_obj(bp, &q->sp_obj,
+ cl_id, &q->cid, 1, func_id,
+ bnx2x_vf_sp(bp, vf, q_data),
+ bnx2x_vf_sp_map(bp, vf, q_data),
+ q_type);
+
+ /* sp indication is set only when vlan/mac/etc. are initialized */
+ q->sp_initialized = false;
+
+ DP(BNX2X_MSG_IOV,
+ "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
+ vf->abs_vfid, q->sp_obj.func_id, q->cid);
+}
+
+/* called by bnx2x_nic_load */
+int bnx2x_iov_nic_init(struct bnx2x *bp)
+{
+ int vfid;
+
+ if (!IS_SRIOV(bp)) {
+ DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
+ return 0;
+ }
+
+ DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
+
+ /* let FLR complete ... */
+ msleep(100);
+
+ /* initialize vf database */
+ for_each_vf(bp, vfid) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vfid);
+
+ int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
+ BNX2X_CIDS_PER_VF;
+
+ union cdu_context *base_cxt = (union cdu_context *)
+ BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
+ (base_vf_cid & (ILT_PAGE_CIDS-1));
+
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
+ vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
+ BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
+
+ /* init statically provisioned resources */
+ bnx2x_iov_static_resc(bp, vf);
+
+ /* queues are initialized during VF-ACQUIRE */
+ vf->filter_state = 0;
+ vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
+
+ /* init mcast object - This object will be re-initialized
+ * during VF-ACQUIRE with the proper cl_id and cid.
+ * It needs to be initialized here so that it can be safely
+ * handled by a subsequent FLR flow.
+ */
+ vf->mcast_list_len = 0;
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
+ 0xFF, 0xFF, 0xFF,
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ /* set the mailbox message addresses */
+ BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
+ (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
+ MBX_MSG_ALIGNED_SIZE);
+
+ BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
+ vfid * MBX_MSG_ALIGNED_SIZE;
+
+ /* Enable vf mailbox */
+ bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+ }
+
+ /* Final VF init */
+ for_each_vf(bp, vfid) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vfid);
+
+ /* fill in the BDF and bars */
+ vf->bus = bnx2x_vf_bus(bp, vfid);
+ vf->devfn = bnx2x_vf_devfn(bp, vfid);
+ bnx2x_vf_set_bars(bp, vf);
+
+ DP(BNX2X_MSG_IOV,
+ "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
+ vf->abs_vfid, vf->bus, vf->devfn,
+ (unsigned)vf->bars[0].bar, vf->bars[0].size,
+ (unsigned)vf->bars[1].bar, vf->bars[1].size,
+ (unsigned)vf->bars[2].bar, vf->bars[2].size);
+ }
+
+ return 0;
+}
+
+/* called by bnx2x_chip_cleanup */
+int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
+{
+ int i;
+
+ if (!IS_SRIOV(bp))
+ return 0;
+
+ /* release all the VFs */
+ for_each_vf(bp, i)
+ bnx2x_vf_release(bp, BP_VF(bp, i));
+
+ return 0;
+}
+
+/* called by bnx2x_init_hw_func, returns the next ilt line */
+int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
+{
+ int i;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+
+ if (!IS_SRIOV(bp))
+ return line;
+
+ /* set vfs ilt lines */
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+ struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
+
+ ilt->lines[line+i].page = hw_cxt->addr;
+ ilt->lines[line+i].page_mapping = hw_cxt->mapping;
+ ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
+ }
+ return line + i;
+}
+
+static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
+{
+ return ((cid >= BNX2X_FIRST_VF_CID) &&
+ ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
+}
+
+static
+void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
+ struct bnx2x_vf_queue *vfq,
+ union event_ring_elem *elem)
+{
+ unsigned long ramrod_flags = 0;
+ int rc = 0;
+
+ /* Always push next commands out, don't wait here */
+ set_bit(RAMROD_CONT, &ramrod_flags);
+
+ switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+ case BNX2X_FILTER_MAC_PENDING:
+ rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
+ &ramrod_flags);
+ break;
+ case BNX2X_FILTER_VLAN_PENDING:
+ rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
+ &ramrod_flags);
+ break;
+ default:
+ BNX2X_ERR("Unsupported classification command: %d\n",
+ elem->message.data.eth_event.echo);
+ return;
+ }
+ if (rc < 0)
+ BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
+ else if (rc > 0)
+ DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
+}
+
+static
+void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
+ int rc;
+
+ rparam.mcast_obj = &vf->mcast_obj;
+ vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
+
+ /* If there are pending mcast commands - send them */
+ if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ if (rc < 0)
+ BNX2X_ERR("Failed to send pending mcast commands: %d\n",
+ rc);
+ }
+}
+
+static
+void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ smp_mb__before_atomic();
+ clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ smp_mb__after_atomic();
+}
+
+static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
+}
+
+int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
+{
+ struct bnx2x_virtf *vf;
+ int qidx = 0, abs_vfid;
+ u8 opcode;
+ u16 cid = 0xffff;
+
+ if (!IS_SRIOV(bp))
+ return 1;
+
+ /* first get the cid - the only events we handle here are cfc-delete
+ * and set-mac completion
+ */
+ opcode = elem->message.opcode;
+
+ switch (opcode) {
+ case EVENT_RING_OPCODE_CFC_DEL:
+ cid = SW_CID((__force __le32)
+ elem->message.data.cfc_del_event.cid);
+ DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
+ break;
+ case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
+ case EVENT_RING_OPCODE_MULTICAST_RULES:
+ case EVENT_RING_OPCODE_FILTERS_RULES:
+ case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
+ cid = (elem->message.data.eth_event.echo &
+ BNX2X_SWCID_MASK);
+ DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
+ break;
+ case EVENT_RING_OPCODE_VF_FLR:
+ abs_vfid = elem->message.data.vf_flr_event.vf_id;
+ DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
+ abs_vfid);
+ goto get_vf;
+ case EVENT_RING_OPCODE_MALICIOUS_VF:
+ abs_vfid = elem->message.data.malicious_vf_event.vf_id;
+ BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
+ abs_vfid,
+ elem->message.data.malicious_vf_event.err_id);
+ goto get_vf;
+ default:
+ return 1;
+ }
+
+ /* check if the cid is the VF range */
+ if (!bnx2x_iov_is_vf_cid(bp, cid)) {
+ DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
+ return 1;
+ }
+
+ /* extract vf and rxq index from vf_cid - relies on the following:
+ * 1. vfid on cid reflects the true abs_vfid
+ * 2. The max number of VFs (per path) is 64
+ */
+ qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
+ abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
+get_vf:
+ vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+
+ if (!vf) {
+ BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
+ cid, abs_vfid);
+ return 0;
+ }
+
+ switch (opcode) {
+ case EVENT_RING_OPCODE_CFC_DEL:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
+ vf->abs_vfid, qidx);
+ vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
+ &vfq_get(vf,
+ qidx)->sp_obj,
+ BNX2X_Q_CMD_CFC_DEL);
+ break;
+ case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
+ break;
+ case EVENT_RING_OPCODE_MULTICAST_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_mcast_eqe(bp, vf);
+ break;
+ case EVENT_RING_OPCODE_FILTERS_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_filters_eqe(bp, vf);
+ break;
+ case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_rss_update_eqe(bp, vf);
+ case EVENT_RING_OPCODE_VF_FLR:
+ case EVENT_RING_OPCODE_MALICIOUS_VF:
+ /* Do nothing for now */
+ return 0;
+ }
+
+ return 0;
+}
+
+static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
+{
+ /* extract the vf from vf_cid - relies on the following:
+ * 1. vfid on cid reflects the true abs_vfid
+ * 2. The max number of VFs (per path) is 64
+ */
+ int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
+ return bnx2x_vf_by_abs_fid(bp, abs_vfid);
+}
+
+void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj)
+{
+ struct bnx2x_virtf *vf;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ vf = bnx2x_vf_by_cid(bp, vf_cid);
+
+ if (vf) {
+ /* extract queue index from vf_cid - relies on the following:
+ * 1. vfid on cid reflects the true abs_vfid
+ * 2. The max number of VFs (per path) is 64
+ */
+ int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
+ *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
+ } else {
+ BNX2X_ERR("No vf matching cid %d\n", vf_cid);
+ }
+}
+
+void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
+{
+ int i;
+ int first_queue_query_index, num_queues_req;
+ dma_addr_t cur_data_offset;
+ struct stats_query_entry *cur_query_entry;
+ u8 stats_count = 0;
+ bool is_fcoe = false;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ if (!NO_FCOE(bp))
+ is_fcoe = true;
+
+ /* fcoe adds one global request and one queue request */
+ num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
+ first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
+ (is_fcoe ? 0 : 1);
+
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
+ BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
+ first_queue_query_index + num_queues_req);
+
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats) +
+ num_queues_req * sizeof(struct per_queue_stats);
+
+ cur_query_entry = &bp->fw_stats_req->
+ query[first_queue_query_index + num_queues_req];
+
+ for_each_vf(bp, i) {
+ int j;
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+
+ if (vf->state != VF_ENABLED) {
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "vf %d not enabled so no stats for it\n",
+ vf->abs_vfid);
+ continue;
+ }
+
+ DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
+ for_each_vfq(vf, j) {
+ struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
+
+ dma_addr_t q_stats_addr =
+ vf->fw_stat_map + j * vf->stats_stride;
+
+ /* collect stats fro active queues only */
+ if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
+ BNX2X_Q_LOGICAL_STATE_STOPPED)
+ continue;
+
+ /* create stats query entry for this queue */
+ cur_query_entry->kind = STATS_TYPE_QUEUE;
+ cur_query_entry->index = vfq_stat_id(vf, rxq);
+ cur_query_entry->funcID =
+ cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(q_stats_addr));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(q_stats_addr));
+ DP(BNX2X_MSG_IOV,
+ "added address %x %x for vf %d queue %d client %d\n",
+ cur_query_entry->address.hi,
+ cur_query_entry->address.lo, cur_query_entry->funcID,
+ j, cur_query_entry->index);
+ cur_query_entry++;
+ cur_data_offset += sizeof(struct per_queue_stats);
+ stats_count++;
+
+ /* all stats are coalesced to the leading queue */
+ if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
+ break;
+ }
+ }
+ bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
+}
+
+static inline
+struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
+{
+ int i;
+ struct bnx2x_virtf *vf = NULL;
+
+ for_each_vf(bp, i) {
+ vf = BP_VF(bp, i);
+ if (stat_id >= vf->igu_base_id &&
+ stat_id < vf->igu_base_id + vf_sb_count(vf))
+ break;
+ }
+ return vf;
+}
+
+/* VF API helpers */
+static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
+ u8 enable)
+{
+ u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
+ u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
+
+ REG_WR(bp, reg, val);
+}
+
+static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int i;
+
+ for_each_vfq(vf, i)
+ bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
+ vfq_qzone_id(vf, vfq_get(vf, i)), false);
+}
+
+static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ u32 val;
+
+ /* clear the VF configuration - pretend */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+ val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
+ val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
+ IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
+ REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
+ BNX2X_VF_MAX_QUEUES);
+}
+
+static
+int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vf_pf_resc_request *req_resc)
+{
+ u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
+
+ /* Save a vlan filter for the Hypervisor */
+ return ((req_resc->num_rxqs <= rxq_cnt) &&
+ (req_resc->num_txqs <= txq_cnt) &&
+ (req_resc->num_sbs <= vf_sb_count(vf)) &&
+ (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
+ (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
+}
+
+/* CORE VF API */
+int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vf_pf_resc_request *resc)
+{
+ int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
+ BNX2X_CIDS_PER_VF;
+
+ union cdu_context *base_cxt = (union cdu_context *)
+ BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
+ (base_vf_cid & (ILT_PAGE_CIDS-1));
+ int i;
+
+ /* if state is 'acquired' the VF was not released or FLR'd, in
+ * this case the returned resources match the acquired already
+ * acquired resources. Verify that the requested numbers do
+ * not exceed the already acquired numbers.
+ */
+ if (vf->state == VF_ACQUIRED) {
+ DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
+ vf->abs_vfid);
+
+ if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
+ BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
+ vf->abs_vfid);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ /* Otherwise vf state must be 'free' or 'reset' */
+ if (vf->state != VF_FREE && vf->state != VF_RESET) {
+ BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
+ vf->abs_vfid, vf->state);
+ return -EINVAL;
+ }
+
+ /* static allocation:
+ * the global maximum number are fixed per VF. Fail the request if
+ * requested number exceed these globals
+ */
+ if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
+ DP(BNX2X_MSG_IOV,
+ "cannot fulfill vf resource request. Placing maximal available values in response\n");
+ /* set the max resource in the vf */
+ return -ENOMEM;
+ }
+
+ /* Set resources counters - 0 request means max available */
+ vf_sb_count(vf) = resc->num_sbs;
+ vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ if (resc->num_mac_filters)
+ vf_mac_rules_cnt(vf) = resc->num_mac_filters;
+ /* Add an additional vlan filter credit for the hypervisor */
+ bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
+
+ DP(BNX2X_MSG_IOV,
+ "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
+ vf_sb_count(vf), vf_rxq_count(vf),
+ vf_txq_count(vf), vf_mac_rules_cnt(vf),
+ vf_vlan_rules_visible_cnt(vf));
+
+ /* Initialize the queues */
+ if (!vf->vfqs) {
+ DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
+ return -EINVAL;
+ }
+
+ for_each_vfq(vf, i) {
+ struct bnx2x_vf_queue *q = vfq_get(vf, i);
+
+ if (!q) {
+ BNX2X_ERR("q number %d was not allocated\n", i);
+ return -EINVAL;
+ }
+
+ q->index = i;
+ q->cxt = &((base_cxt + i)->eth);
+ q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
+
+ DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
+ vf->abs_vfid, i, q->index, q->cid, q->cxt);
+
+ /* init SP objects */
+ bnx2x_vfq_init(bp, vf, q);
+ }
+ vf->state = VF_ACQUIRED;
+ return 0;
+}
+
+int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
+{
+ struct bnx2x_func_init_params func_init = {0};
+ u16 flags = 0;
+ int i;
+
+ /* the sb resources are initialized at this point, do the
+ * FW/HW initializations
+ */
+ for_each_vf_sb(vf, i)
+ bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
+ vf_igu_sb(vf, i), vf_igu_sb(vf, i));
+
+ /* Sanity checks */
+ if (vf->state != VF_ACQUIRED) {
+ DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
+ vf->abs_vfid, vf->state);
+ return -EINVAL;
+ }
+
+ /* let FLR complete ... */
+ msleep(100);
+
+ /* FLR cleanup epilogue */
+ if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
+ return -EBUSY;
+
+ /* reset IGU VF statistics: MSIX */
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
+
+ /* vf init */
+ if (vf->cfg_flags & VF_CFG_STATS)
+ flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
+
+ if (vf->cfg_flags & VF_CFG_TPA)
+ flags |= FUNC_FLG_TPA;
+
+ if (is_vf_multi(vf))
+ flags |= FUNC_FLG_RSS;
+
+ /* function setup */
+ func_init.func_flgs = flags;
+ func_init.pf_id = BP_FUNC(bp);
+ func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
+ func_init.fw_stat_map = vf->fw_stat_map;
+ func_init.spq_map = vf->spq_map;
+ func_init.spq_prod = 0;
+ bnx2x_func_init(bp, &func_init);
+
+ /* Enable the vf */
+ bnx2x_vf_enable_access(bp, vf->abs_vfid);
+ bnx2x_vf_enable_traffic(bp, vf);
+
+ /* queue protection table */
+ for_each_vfq(vf, i)
+ bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
+ vfq_qzone_id(vf, vfq_get(vf, i)), true);
+
+ vf->state = VF_ENABLED;
+
+ /* update vf bulletin board */
+ bnx2x_post_vf_bulletin(bp, vf->index);
+
+ return 0;
+}
+
+struct set_vf_state_cookie {
+ struct bnx2x_virtf *vf;
+ u8 state;
+};
+
+static void bnx2x_set_vf_state(void *cookie)
+{
+ struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
+
+ p->vf->state = p->state;
+}
+
+int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int rc = 0, i;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ /* Close all queues */
+ for (i = 0; i < vf_rxq_count(vf); i++) {
+ rc = bnx2x_vf_queue_teardown(bp, vf, i);
+ if (rc)
+ goto op_err;
+ }
+
+ /* disable the interrupts */
+ DP(BNX2X_MSG_IOV, "disabling igu\n");
+ bnx2x_vf_igu_disable(bp, vf);
+
+ /* disable the VF */
+ DP(BNX2X_MSG_IOV, "clearing qtbl\n");
+ bnx2x_vf_clr_qtbl(bp, vf);
+
+ /* need to make sure there are no outstanding stats ramrods which may
+ * cause the device to access the VF's stats buffer which it will free
+ * as soon as we return from the close flow.
+ */
+ {
+ struct set_vf_state_cookie cookie;
+
+ cookie.vf = vf;
+ cookie.state = VF_ACQUIRED;
+ bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
+ }
+
+ DP(BNX2X_MSG_IOV, "set state to acquired\n");
+
+ return 0;
+op_err:
+ BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
+ return rc;
+}
+
+/* VF release can be called either: 1. The VF was acquired but
+ * not enabled 2. the vf was enabled or in the process of being
+ * enabled
+ */
+int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
+ vf->state == VF_FREE ? "Free" :
+ vf->state == VF_ACQUIRED ? "Acquired" :
+ vf->state == VF_ENABLED ? "Enabled" :
+ vf->state == VF_RESET ? "Reset" :
+ "Unknown");
+
+ switch (vf->state) {
+ case VF_ENABLED:
+ rc = bnx2x_vf_close(bp, vf);
+ if (rc)
+ goto op_err;
+ /* Fallthrough to release resources */
+ case VF_ACQUIRED:
+ DP(BNX2X_MSG_IOV, "about to free resources\n");
+ bnx2x_vf_free_resc(bp, vf);
+ break;
+
+ case VF_FREE:
+ case VF_RESET:
+ default:
+ break;
+ }
+ return 0;
+op_err:
+ BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
+ return rc;
+}
+
+int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_config_rss_params *rss)
+{
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+ set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
+ return bnx2x_config_rss(bp, rss);
+}
+
+int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vfpf_tpa_tlv *tlv,
+ struct bnx2x_queue_update_tpa_params *params)
+{
+ aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
+ struct bnx2x_queue_state_params qstate;
+ int qid, rc = 0;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ /* Set ramrod params */
+ memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
+ memcpy(&qstate.params.update_tpa, params,
+ sizeof(struct bnx2x_queue_update_tpa_params));
+ qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
+ set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
+
+ for (qid = 0; qid < vf_rxq_count(vf); qid++) {
+ qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ qstate.params.update_tpa.sge_map = sge_addr[qid];
+ DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
+ vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
+ U64_LO(sge_addr[qid]));
+ rc = bnx2x_queue_state_change(bp, &qstate);
+ if (rc) {
+ BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
+ U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
+ vf->abs_vfid, qid);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+/* VF release ~ VF close + VF release-resources
+ * Release is the ultimate SW shutdown and is called whenever an
+ * irrecoverable error is encountered.
+ */
+int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+
+ rc = bnx2x_vf_free(bp, vf);
+ if (rc)
+ WARN(rc,
+ "VF[%d] Failed to allocate resources for release op- rc=%d\n",
+ vf->abs_vfid, rc);
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+ return rc;
+}
+
+static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, u32 *sbdf)
+{
+ *sbdf = vf->devfn | (vf->bus << 8);
+}
+
+void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs tlv)
+{
+ /* we don't lock the channel for unsupported tlvs */
+ if (!bnx2x_tlv_supported(tlv)) {
+ BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
+ return;
+ }
+
+ /* lock the channel */
+ mutex_lock(&vf->op_mutex);
+
+ /* record the locking op */
+ vf->op_current = tlv;
+
+ /* log the lock */
+ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
+ vf->abs_vfid, tlv);
+}
+
+void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs expected_tlv)
+{
+ enum channel_tlvs current_tlv;
+
+ if (!vf) {
+ BNX2X_ERR("VF was %p\n", vf);
+ return;
+ }
+
+ current_tlv = vf->op_current;
+
+ /* we don't unlock the channel for unsupported tlvs */
+ if (!bnx2x_tlv_supported(expected_tlv))
+ return;
+
+ WARN(expected_tlv != vf->op_current,
+ "lock mismatch: expected %d found %d", expected_tlv,
+ vf->op_current);
+
+ /* record the locking op */
+ vf->op_current = CHANNEL_TLV_NONE;
+
+ /* lock the channel */
+ mutex_unlock(&vf->op_mutex);
+
+ /* log the unlock */
+ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
+ vf->abs_vfid, vf->op_current);
+}
+
+static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
+{
+ struct bnx2x_queue_state_params q_params;
+ u32 prev_flags;
+ int i, rc;
+
+ /* Verify changes are needed and record current Tx switching state */
+ prev_flags = bp->flags;
+ if (enable)
+ bp->flags |= TX_SWITCHING;
+ else
+ bp->flags &= ~TX_SWITCHING;
+ if (prev_flags == bp->flags)
+ return 0;
+
+ /* Verify state enables the sending of queue ramrods */
+ if ((bp->state != BNX2X_STATE_OPEN) ||
+ (bnx2x_get_q_logical_state(bp,
+ &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE))
+ return 0;
+
+ /* send q. update ramrod to configure Tx switching */
+ memset(&q_params, 0, sizeof(q_params));
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ &q_params.params.update.update_flags);
+ if (enable)
+ __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+ &q_params.params.update.update_flags);
+ else
+ __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+ &q_params.params.update.update_flags);
+
+ /* send the ramrod on all the queues of the PF */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ /* Set the appropriate Queue object */
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure Tx switching\n");
+ return rc;
+ }
+ }
+
+ DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
+ return 0;
+}
+
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
+{
+ struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
+
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
+ num_vfs_param, BNX2X_NR_VIRTFN(bp));
+
+ /* HW channel is only operational when PF is up */
+ if (bp->state != BNX2X_STATE_OPEN) {
+ BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
+ return -EINVAL;
+ }
+
+ /* we are always bound by the total_vfs in the configuration space */
+ if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
+ num_vfs_param, BNX2X_NR_VIRTFN(bp));
+ num_vfs_param = BNX2X_NR_VIRTFN(bp);
+ }
+
+ bp->requested_nr_virtfn = num_vfs_param;
+ if (num_vfs_param == 0) {
+ bnx2x_set_pf_tx_switching(bp, false);
+ pci_disable_sriov(dev);
+ return 0;
+ } else {
+ return bnx2x_enable_sriov(bp);
+ }
+}
+
+#define IGU_ENTRY_SIZE 4
+
+int bnx2x_enable_sriov(struct bnx2x *bp)
+{
+ int rc = 0, req_vfs = bp->requested_nr_virtfn;
+ int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
+ u32 igu_entry, address;
+ u16 num_vf_queues;
+
+ if (req_vfs == 0)
+ return 0;
+
+ first_vf = bp->vfdb->sriov.first_vf_in_pf;
+
+ /* statically distribute vf sb pool between VFs */
+ num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
+ BP_VFDB(bp)->vf_sbs_pool / req_vfs);
+
+ /* zero previous values learned from igu cam */
+ for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ vf->sb_count = 0;
+ vf_sb_count(BP_VF(bp, vf_idx)) = 0;
+ }
+ bp->vfdb->vf_sbs_pool = 0;
+
+ /* prepare IGU cam */
+ sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
+ address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
+ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+ for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
+ igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
+ vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
+ IGU_REG_MAPPING_MEMORY_VALID;
+ DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
+ sb_idx, vf_idx);
+ REG_WR(bp, address, igu_entry);
+ sb_idx++;
+ address += IGU_ENTRY_SIZE;
+ }
+ }
+
+ /* Reinitialize vf database according to igu cam */
+ bnx2x_get_vf_igu_cam_info(bp);
+
+ DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
+ BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
+
+ qcount = 0;
+ for_each_vf(bp, vf_idx) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ /* set local queue arrays */
+ vf->vfqs = &bp->vfdb->vfqs[qcount];
+ qcount += vf_sb_count(vf);
+ bnx2x_iov_static_resc(bp, vf);
+ }
+
+ /* prepare msix vectors in VF configuration space - the value in the
+ * PCI configuration space should be the index of the last entry,
+ * namely one less than the actual size of the table
+ */
+ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
+ REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
+ num_vf_queues - 1);
+ DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
+ vf_idx, num_vf_queues - 1);
+ }
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* enable sriov. This will probe all the VFs, and consequentially cause
+ * the "acquire" messages to appear on the VF PF channel.
+ */
+ DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
+ bnx2x_disable_sriov(bp);
+
+ rc = bnx2x_set_pf_tx_switching(bp, true);
+ if (rc)
+ return rc;
+
+ rc = pci_enable_sriov(bp->pdev, req_vfs);
+ if (rc) {
+ BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
+ return rc;
+ }
+ DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
+ return req_vfs;
+}
+
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
+{
+ int vfidx;
+ struct pf_vf_bulletin_content *bulletin;
+
+ DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
+ for_each_vf(bp, vfidx) {
+ bulletin = BP_VF_BULLETIN(bp, vfidx);
+ if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
+ bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
+ }
+}
+
+void bnx2x_disable_sriov(struct bnx2x *bp)
+{
+ pci_disable_sriov(bp->pdev);
+}
+
+static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
+ struct bnx2x_virtf **vf,
+ struct pf_vf_bulletin_content **bulletin)
+{
+ if (bp->state != BNX2X_STATE_OPEN) {
+ BNX2X_ERR("vf ndo called though PF is down\n");
+ return -EINVAL;
+ }
+
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("vf ndo called though sriov is disabled\n");
+ return -EINVAL;
+ }
+
+ if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
+ vfidx, BNX2X_NR_VIRTFN(bp));
+ return -EINVAL;
+ }
+
+ /* init members */
+ *vf = BP_VF(bp, vfidx);
+ *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ if (!*vf) {
+ BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
+ if (!(*vf)->vfqs) {
+ BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
+ if (!*bulletin) {
+ BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
+ struct ifla_vf_info *ivi)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
+ struct bnx2x_vlan_mac_obj *mac_obj;
+ struct bnx2x_vlan_mac_obj *vlan_obj;
+ int rc;
+
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
+ if (rc)
+ return rc;
+ mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ if (!mac_obj || !vlan_obj) {
+ BNX2X_ERR("VF partially initialized\n");
+ return -EINVAL;
+ }
+
+ ivi->vf = vfidx;
+ ivi->qos = 0;
+ ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
+ ivi->min_tx_rate = 0;
+ ivi->spoofchk = 1; /*always enabled */
+ if (vf->state == VF_ENABLED) {
+ /* mac and vlan are in vlan_mac objects */
+ if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
+ mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
+ 0, ETH_ALEN);
+ vlan_obj->get_n_elements(bp, vlan_obj, 1,
+ (u8 *)&ivi->vlan, 0,
+ VLAN_HLEN);
+ }
+ } else {
+ /* mac */
+ if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
+ /* mac configured by ndo so its in bulletin board */
+ memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
+ else
+ /* function has not been loaded yet. Show mac as 0s */
+ memset(&ivi->mac, 0, ETH_ALEN);
+
+ /* vlan */
+ if (bulletin->valid_bitmap & (1 << VLAN_VALID))
+ /* vlan configured by ndo so its in bulletin board */
+ memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
+ else
+ /* function has not been loaded yet. Show vlans as 0s */
+ memset(&ivi->vlan, 0, VLAN_HLEN);
+ }
+
+ return 0;
+}
+
+/* New mac for VF. Consider these cases:
+ * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
+ * supply at acquire.
+ * 2. VF has already been acquired but has not yet initialized - store in local
+ * bulletin board. mac will be posted on VF bulletin board after VF init. VF
+ * will configure this mac when it is ready.
+ * 3. VF has already initialized but has not yet setup a queue - post the new
+ * mac on VF's bulletin board right now. VF will configure this mac when it
+ * is ready.
+ * 4. VF has already set a queue - delete any macs already configured for this
+ * queue and manually config the new mac.
+ * In any event, once this function has been called refuse any attempts by the
+ * VF to configure any mac for itself except for this mac. In case of a race
+ * where the VF fails to see the new post on its bulletin board before sending a
+ * mac configuration request, the PF will simply fail the request and VF can try
+ * again after consulting its bulletin board.
+ */
+int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc, q_logical_state;
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
+
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
+ if (rc)
+ return rc;
+ if (!is_valid_ether_addr(mac)) {
+ BNX2X_ERR("mac address invalid\n");
+ return -EINVAL;
+ }
+
+ /* update PF's copy of the VF's bulletin. Will no longer accept mac
+ * configuration requests from vf unless match this mac
+ */
+ bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
+ memcpy(bulletin->mac, mac, ETH_ALEN);
+
+ /* Post update on VF's bulletin board */
+ rc = bnx2x_post_vf_bulletin(bp, vfidx);
+ if (rc) {
+ BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
+ return rc;
+ }
+
+ q_logical_state =
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
+ if (vf->state == VF_ENABLED &&
+ q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ /* configure the mac in device on this vf's queue */
+ unsigned long ramrod_flags = 0;
+ struct bnx2x_vlan_mac_obj *mac_obj;
+
+ /* User should be able to see failure reason in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
+
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
+
+ /* remove existing eth macs */
+ mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
+ if (rc) {
+ BNX2X_ERR("failed to delete eth macs\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* remove existing uc list macs */
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
+ if (rc) {
+ BNX2X_ERR("failed to delete uc_list macs\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* configure the new mac to device */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
+ BNX2X_ETH_MAC, &ramrod_flags);
+
+out:
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
+ }
+
+ return rc;
+}
+
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+{
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+ struct bnx2x_queue_update_params *update_params;
+ struct pf_vf_bulletin_content *bulletin = NULL;
+ struct bnx2x_rx_mode_ramrod_params rx_ramrod;
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_vlan_mac_obj *vlan_obj;
+ unsigned long vlan_mac_flags = 0;
+ unsigned long ramrod_flags = 0;
+ struct bnx2x_virtf *vf = NULL;
+ unsigned long accept_flags;
+ int rc;
+
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
+ if (rc)
+ return rc;
+
+ if (vlan > 4095) {
+ BNX2X_ERR("illegal vlan value %d\n", vlan);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
+ vfidx, vlan, 0);
+
+ /* update PF's copy of the VF's bulletin. No point in posting the vlan
+ * to the VF since it doesn't have anything to do with it. But it useful
+ * to store it here in case the VF is not up yet and we can only
+ * configure the vlan later when it does. Treat vlan id 0 as remove the
+ * Host tag.
+ */
+ if (vlan > 0)
+ bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ else
+ bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
+ bulletin->vlan = vlan;
+
+ /* is vf initialized and queue set up? */
+ if (vf->state != VF_ENABLED ||
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ return rc;
+
+ /* User should be able to see error in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
+
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
+ /* remove existing vlans */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("failed to delete vlans\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* need to remove/add the VF's accept_any_vlan bit */
+ accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+ if (vlan)
+ clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+ else
+ set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+ bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+ accept_flags);
+ bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+ bnx2x_config_rx_mode(bp, &rx_ramrod);
+
+ /* configure the new vlan to device */
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ ramrod_param.vlan_mac_obj = vlan_obj;
+ ramrod_param.ramrod_flags = ramrod_flags;
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &ramrod_param.user_req.vlan_mac_flags);
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc) {
+ BNX2X_ERR("failed to configure vlan\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* send queue update ramrod to configure default vlan and silent
+ * vlan removal
+ */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &update_params->update_flags);
+ if (vlan == 0) {
+ /* if vlan is 0 then we want to leave the VF traffic
+ * untagged, and leave the incoming traffic untouched
+ * (i.e. do not remove any vlan tags).
+ */
+ __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ } else {
+ /* configure default vlan to vf queue and set silent
+ * vlan removal (the vf remains unaware of this vlan).
+ */
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ update_params->def_vlan = vlan;
+ update_params->silent_removal_value =
+ vlan & VLAN_VID_MASK;
+ update_params->silent_removal_mask = VLAN_VID_MASK;
+ }
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure default VLAN\n");
+ goto out;
+ }
+
+
+ /* clear the flag indicating that this VF needs its vlan
+ * (will only be set if the HV configured the Vlan before vf was
+ * up and we were called because the VF came up later
+ */
+out:
+ vf->cfg_flags &= ~VF_CFG_VLAN;
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
+ return rc;
+}
+
+/* crc is the first field in the bulletin board. Compute the crc over the
+ * entire bulletin board excluding the crc field itself. Use the length field
+ * as the Bulletin Board was posted by a PF with possibly a different version
+ * from the vf which will sample it. Therefore, the length is computed by the
+ * PF and the used blindly by the VF.
+ */
+u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
+ struct pf_vf_bulletin_content *bulletin)
+{
+ return crc32(BULLETIN_CRC_SEED,
+ ((u8 *)bulletin) + sizeof(bulletin->crc),
+ bulletin->length - sizeof(bulletin->crc));
+}
+
+/* Check for new posts on the bulletin board */
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
+{
+ struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
+ int attempts;
+
+ /* bulletin board hasn't changed since last sample */
+ if (bp->old_bulletin.version == bulletin.version)
+ return PFVF_BULLETIN_UNCHANGED;
+
+ /* validate crc of new bulletin board */
+ if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
+ /* sampling structure in mid post may result with corrupted data
+ * validate crc to ensure coherency.
+ */
+ for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
+ bulletin = bp->pf2vf_bulletin->content;
+ if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
+ &bulletin))
+ break;
+ BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
+ bulletin.crc,
+ bnx2x_crc_vf_bulletin(bp, &bulletin));
+ }
+ if (attempts >= BULLETIN_ATTEMPTS) {
+ BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
+ attempts);
+ return PFVF_BULLETIN_CRC_ERR;
+ }
+ }
+
+ /* the mac address in bulletin board is valid and is new */
+ if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
+ !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) {
+ /* update new mac to net device */
+ memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
+ }
+
+ /* the vlan in bulletin board is valid and is new */
+ if (bulletin.valid_bitmap & 1 << VLAN_VALID)
+ memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
+
+ /* copy new bulletin board to bp */
+ bp->old_bulletin = bulletin;
+
+ return PFVF_BULLETIN_UPDATED;
+}
+
+void bnx2x_timer_sriov(struct bnx2x *bp)
+{
+ bnx2x_sample_bulletin(bp);
+
+ /* if channel is down we need to self destruct */
+ if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+ BNX2X_MSG_IOV);
+}
+
+void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
+{
+ /* vf doorbells are embedded within the regview */
+ return bp->regview + PXP_VF_ADDR_DB_START;
+}
+
+void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
+{
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
+ sizeof(union pf_vf_bulletin));
+}
+
+int bnx2x_vf_pci_alloc(struct bnx2x *bp)
+{
+ mutex_init(&bp->vf2pf_mutex);
+
+ /* allocate vf2pf mailbox for vf to pf channel */
+ bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+ if (!bp->vf2pf_mbox)
+ goto alloc_mem_err;
+
+ /* allocate pf 2 vf bulletin board */
+ bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
+ sizeof(union pf_vf_bulletin));
+ if (!bp->pf2vf_bulletin)
+ goto alloc_mem_err;
+
+ return 0;
+
+alloc_mem_err:
+ bnx2x_vf_pci_dealloc(bp);
+ return -ENOMEM;
+}
+
+void bnx2x_iov_channel_down(struct bnx2x *bp)
+{
+ int vf_idx;
+ struct pf_vf_bulletin_content *bulletin;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ for_each_vf(bp, vf_idx) {
+ /* locate this VFs bulletin board and update the channel down
+ * bit
+ */
+ bulletin = BP_VF_BULLETIN(bp, vf_idx);
+ bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
+
+ /* update vf bulletin board */
+ bnx2x_post_vf_bulletin(bp, vf_idx);
+ }
+}
+
+void bnx2x_iov_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
+
+ if (!netif_running(bp->dev))
+ return;
+
+ if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
+ &bp->iov_task_state))
+ bnx2x_vf_handle_flr_event(bp);
+
+ if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
+ &bp->iov_task_state))
+ bnx2x_vf_mbx(bp);
+}
+
+void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
+{
+ smp_mb__before_atomic();
+ set_bit(flag, &bp->iov_task_state);
+ smp_mb__after_atomic();
+ DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
+ queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
new file mode 100644
index 00000000000..96c575e147a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -0,0 +1,584 @@
+/* bnx2x_sriov.h: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
+ */
+#ifndef BNX2X_SRIOV_H
+#define BNX2X_SRIOV_H
+
+#include "bnx2x_vfpf.h"
+#include "bnx2x.h"
+
+enum sample_bulletin_result {
+ PFVF_BULLETIN_UNCHANGED,
+ PFVF_BULLETIN_UPDATED,
+ PFVF_BULLETIN_CRC_ERR
+};
+
+#ifdef CONFIG_BNX2X_SRIOV
+
+extern struct workqueue_struct *bnx2x_iov_wq;
+
+/* The bnx2x device structure holds vfdb structure described below.
+ * The VF array is indexed by the relative vfid.
+ */
+#define BNX2X_VF_MAX_QUEUES 16
+#define BNX2X_VF_MAX_TPA_AGG_QUEUES 8
+
+struct bnx2x_sriov {
+ u32 first_vf_in_pf;
+
+ /* standard SRIOV capability fields, mostly for debugging */
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total; /* total VFs associated with the PF */
+ u16 initial; /* initial VFs associated with the PF */
+ u16 nr_virtfn; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+};
+
+/* bars */
+struct bnx2x_vf_bar {
+ u64 bar;
+ u32 size;
+};
+
+struct bnx2x_vf_bar_info {
+ struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS];
+ u8 nr_bars;
+};
+
+/* vf queue (used both for rx or tx) */
+struct bnx2x_vf_queue {
+ struct eth_context *cxt;
+
+ /* MACs object */
+ struct bnx2x_vlan_mac_obj mac_obj;
+
+ /* VLANs object */
+ struct bnx2x_vlan_mac_obj vlan_obj;
+ atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
+ unsigned long accept_flags; /* last accept flags configured */
+
+ /* Queue Slow-path State object */
+ struct bnx2x_queue_sp_obj sp_obj;
+
+ u32 cid;
+ u16 index;
+ u16 sb_idx;
+ bool is_leading;
+ bool sp_initialized;
+};
+
+/* struct bnx2x_vf_queue_construct_params - prepare queue construction
+ * parameters: q-init, q-setup and SB index
+ */
+struct bnx2x_vf_queue_construct_params {
+ struct bnx2x_queue_state_params qstate;
+ struct bnx2x_queue_setup_params prep_qsetup;
+};
+
+/* forward */
+struct bnx2x_virtf;
+
+/* VFOP definitions */
+
+struct bnx2x_vf_mac_vlan_filter {
+ int type;
+#define BNX2X_VF_FILTER_MAC 1
+#define BNX2X_VF_FILTER_VLAN 2
+
+ bool add;
+ u8 *mac;
+ u16 vid;
+};
+
+struct bnx2x_vf_mac_vlan_filters {
+ int count;
+ struct bnx2x_vf_mac_vlan_filter filters[];
+};
+
+/* vf context */
+struct bnx2x_virtf {
+ u16 cfg_flags;
+#define VF_CFG_STATS 0x0001
+#define VF_CFG_FW_FC 0x0002
+#define VF_CFG_TPA 0x0004
+#define VF_CFG_INT_SIMD 0x0008
+#define VF_CACHE_LINE 0x0010
+#define VF_CFG_VLAN 0x0020
+#define VF_CFG_STATS_COALESCE 0x0040
+
+ u8 state;
+#define VF_FREE 0 /* VF ready to be acquired holds no resc */
+#define VF_ACQUIRED 1 /* VF acquired, but not initialized */
+#define VF_ENABLED 2 /* VF Enabled */
+#define VF_RESET 3 /* VF FLR'd, pending cleanup */
+
+ bool flr_clnup_stage; /* true during flr cleanup */
+
+ /* dma */
+ dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
+ u16 stats_stride;
+ dma_addr_t spq_map;
+ dma_addr_t bulletin_map;
+
+ /* Allocated resources counters. Before the VF is acquired, the
+ * counters hold the following values:
+ *
+ * - xxq_count = 0 as the queues memory is not allocated yet.
+ *
+ * - sb_count = The number of status blocks configured for this VF in
+ * the IGU CAM. Initially read during probe.
+ *
+ * - xx_rules_count = The number of rules statically and equally
+ * allocated for each VF, during PF load.
+ */
+ struct vf_pf_resc_request alloc_resc;
+#define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs)
+#define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs)
+#define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs)
+#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
+#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
+#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
+ /* Hide a single vlan filter credit for the hypervisor */
+#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1)
+
+ u8 sb_count; /* actual number of SBs */
+ u8 igu_base_id; /* base igu status block id */
+
+ struct bnx2x_vf_queue *vfqs;
+#define LEADING_IDX 0
+#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX)
+#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
+#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var)
+
+ u8 index; /* index in the vf array */
+ u8 abs_vfid;
+ u8 sp_cl_id;
+ u32 error; /* 0 means all's-well */
+
+ /* BDF */
+ unsigned int bus;
+ unsigned int devfn;
+
+ /* bars */
+ struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS];
+
+ /* set-mac ramrod state 1-pending, 0-done */
+ unsigned long filter_state;
+
+ /* leading rss client id ~~ the client id of the first rxq, must be
+ * set for each txq.
+ */
+ int leading_rss;
+
+ /* MCAST object */
+ int mcast_list_len;
+ struct bnx2x_mcast_obj mcast_obj;
+
+ /* RSS configuration object */
+ struct bnx2x_rss_config_obj rss_conf_obj;
+
+ /* slow-path operations */
+ struct mutex op_mutex; /* one vfop at a time mutex */
+ enum channel_tlvs op_current;
+};
+
+#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn)
+
+#define for_each_vf(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++)
+
+#define for_each_vfq(vf, var) \
+ for ((var) = 0; (var) < vf_rxq_count(vf); (var)++)
+
+#define for_each_vf_sb(vf, var) \
+ for ((var) = 0; (var) < vf_sb_count(vf); (var)++)
+
+#define is_vf_multi(vf) (vf_rxq_count(vf) > 1)
+
+#define HW_VF_HANDLE(bp, abs_vfid) \
+ (u16)(BP_ABS_FUNC((bp)) | (1<<3) | ((u16)(abs_vfid) << 4))
+
+#define FW_PF_MAX_HANDLE 8
+
+#define FW_VF_HANDLE(abs_vfid) \
+ (abs_vfid + FW_PF_MAX_HANDLE)
+
+/* locking and unlocking the channel mutex */
+void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs tlv);
+
+void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs expected_tlv);
+
+/* VF mail box (aka vf-pf channel) */
+
+/* a container for the bi-directional vf<-->pf messages.
+ * The actual response will be placed according to the offset parameter
+ * provided in the request
+ */
+
+#define MBX_MSG_ALIGN 8
+#define MBX_MSG_ALIGNED_SIZE (roundup(sizeof(struct bnx2x_vf_mbx_msg), \
+ MBX_MSG_ALIGN))
+
+struct bnx2x_vf_mbx_msg {
+ union vfpf_tlvs req;
+ union pfvf_tlvs resp;
+};
+
+struct bnx2x_vf_mbx {
+ struct bnx2x_vf_mbx_msg *msg;
+ dma_addr_t msg_mapping;
+
+ /* VF GPA address */
+ u32 vf_addr_lo;
+ u32 vf_addr_hi;
+
+ struct vfpf_first_tlv first_tlv; /* saved VF request header */
+};
+
+struct bnx2x_vf_sp {
+ union {
+ struct eth_classify_rules_ramrod_data e2;
+ } mac_rdata;
+
+ union {
+ struct eth_classify_rules_ramrod_data e2;
+ } vlan_rdata;
+
+ union {
+ struct eth_filter_rules_ramrod_data e2;
+ } rx_mode_rdata;
+
+ union {
+ struct eth_multicast_rules_ramrod_data e2;
+ } mcast_rdata;
+
+ union {
+ struct client_init_ramrod_data init_data;
+ struct client_update_ramrod_data update_data;
+ } q_data;
+
+ union {
+ struct eth_rss_update_ramrod_data e2;
+ } rss_rdata;
+};
+
+struct hw_dma {
+ void *addr;
+ dma_addr_t mapping;
+ size_t size;
+};
+
+struct bnx2x_vfdb {
+#define BP_VFDB(bp) ((bp)->vfdb)
+ /* vf array */
+ struct bnx2x_virtf *vfs;
+#define BP_VF(bp, idx) (&((bp)->vfdb->vfs[(idx)]))
+#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[(idx)].var)
+
+ /* queue array - for all vfs */
+ struct bnx2x_vf_queue *vfqs;
+
+ /* vf HW contexts */
+ struct hw_dma context[BNX2X_VF_CIDS/ILT_PAGE_CIDS];
+#define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[(i)])
+
+ /* SR-IOV information */
+ struct bnx2x_sriov sriov;
+ struct hw_dma mbx_dma;
+#define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma))
+ struct bnx2x_vf_mbx mbxs[BNX2X_MAX_NUM_OF_VFS];
+#define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[(vfid)]))
+
+ struct hw_dma bulletin_dma;
+#define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma))
+#define BP_VF_BULLETIN(bp, vf) \
+ (((struct pf_vf_bulletin_content *)(BP_VF_BULLETIN_DMA(bp)->addr)) \
+ + (vf))
+
+ struct hw_dma sp_dma;
+#define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr + \
+ (vf)->index * sizeof(struct bnx2x_vf_sp) + \
+ offsetof(struct bnx2x_vf_sp, field))
+#define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \
+ (vf)->index * sizeof(struct bnx2x_vf_sp) + \
+ offsetof(struct bnx2x_vf_sp, field))
+
+#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
+ u32 flrd_vfs[FLRD_VFS_DWORDS];
+
+ /* the number of msix vectors belonging to this PF designated for VFs */
+ u16 vf_sbs_pool;
+ u16 first_vf_igu_entry;
+
+ /* sp_rtnl synchronization */
+ struct mutex event_mutex;
+ u64 event_occur;
+};
+
+/* queue access */
+static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
+{
+ return &(vf->vfqs[index]);
+}
+
+/* FW ids */
+static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
+{
+ return vf->igu_base_id + sb_idx;
+}
+
+static inline u8 vf_hc_qzone(struct bnx2x_virtf *vf, u16 sb_idx)
+{
+ return vf_igu_sb(vf, sb_idx);
+}
+
+static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+ return vf->igu_base_id + q->index;
+}
+
+static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+ if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
+ return vf->leading_rss;
+ else
+ return vfq_cl_id(vf, q);
+}
+
+static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+ return vfq_cl_id(vf, q);
+}
+
+/* global iov routines */
+int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line);
+int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param);
+void bnx2x_iov_remove_one(struct bnx2x *bp);
+void bnx2x_iov_free_mem(struct bnx2x *bp);
+int bnx2x_iov_alloc_mem(struct bnx2x *bp);
+int bnx2x_iov_nic_init(struct bnx2x *bp);
+int bnx2x_iov_chip_cleanup(struct bnx2x *bp);
+void bnx2x_iov_init_dq(struct bnx2x *bp);
+void bnx2x_iov_init_dmae(struct bnx2x *bp);
+void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj);
+int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
+void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
+void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
+/* global vf mailbox routines */
+void bnx2x_vf_mbx(struct bnx2x *bp);
+void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event);
+void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
+
+/* CORE VF API */
+typedef u8 bnx2x_mac_addr_t[ETH_ALEN];
+
+/* acquire */
+int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vf_pf_resc_request *resc);
+/* init */
+int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ dma_addr_t *sb_map);
+
+/* VFOP queue construction helpers */
+void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx);
+
+void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx);
+
+void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q,
+ struct bnx2x_vf_queue_construct_params *p,
+ unsigned long q_type);
+
+int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mac_vlan_filters *filters,
+ int qid, bool drv_only);
+
+int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_queue_construct_params *qctor);
+
+int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid);
+
+int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only);
+
+int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid, unsigned long accept_flags);
+
+int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_config_rss_params *rss);
+
+int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vfpf_tpa_tlv *tlv,
+ struct bnx2x_queue_update_tpa_params *params);
+
+/* VF release ~ VF close + VF release-resources
+ *
+ * Release is the ultimate SW shutdown and is called whenever an
+ * irrecoverable error is encountered.
+ */
+int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf);
+int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
+u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+/* FLR routines */
+
+/* VF FLR helpers */
+int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid);
+void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);
+
+/* Handles an FLR (or VF_DISABLE) notification form the MCP */
+void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
+
+bool bnx2x_tlv_supported(u16 tlvtype);
+
+u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
+ struct pf_vf_bulletin_content *bulletin);
+int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf);
+
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
+
+/* VF side vfpf channel functions */
+int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count);
+int bnx2x_vfpf_release(struct bnx2x *bp);
+int bnx2x_vfpf_release(struct bnx2x *bp);
+int bnx2x_vfpf_init(struct bnx2x *bp);
+void bnx2x_vfpf_close_vf(struct bnx2x *bp);
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool is_leading);
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
+int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params);
+int bnx2x_vfpf_set_mcast(struct net_device *dev);
+int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
+
+static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
+ size_t buf_len)
+{
+ strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
+}
+
+static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp)
+{
+ return PXP_VF_ADDR_USDM_QUEUES_START +
+ bp->acquire_resp.resc.hw_qid[fp->index] *
+ sizeof(struct ustorm_queue_zone_data);
+}
+
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
+void bnx2x_timer_sriov(struct bnx2x *bp);
+void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
+void bnx2x_vf_pci_dealloc(struct bnx2x *bp);
+int bnx2x_vf_pci_alloc(struct bnx2x *bp);
+int bnx2x_enable_sriov(struct bnx2x *bp);
+void bnx2x_disable_sriov(struct bnx2x *bp);
+static inline int bnx2x_vf_headroom(struct bnx2x *bp)
+{
+ return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF;
+}
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
+void bnx2x_iov_channel_down(struct bnx2x *bp);
+
+void bnx2x_iov_task(struct work_struct *work);
+
+void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
+
+#else /* CONFIG_BNX2X_SRIOV */
+
+static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj) {}
+static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
+static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
+ union event_ring_elem *elem) {return 1; }
+static inline void bnx2x_vf_mbx(struct bnx2x *bp) {}
+static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event) {}
+static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
+static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
+static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_free_mem(struct bnx2x *bp) {}
+static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
+static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
+ int num_vfs_param) {return 0; }
+static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
+static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_disable_sriov(struct bnx2x *bp) {}
+static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
+ u8 tx_count, u8 rx_count) {return 0; }
+static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
+static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
+static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
+ u8 vf_qid, bool set) {return 0; }
+static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params) {return 0; }
+static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
+static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {}
+static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
+ size_t buf_len) {}
+static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp) {return 0; }
+static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
+{
+ return PFVF_BULLETIN_UNCHANGED;
+}
+static inline void bnx2x_timer_sriov(struct bnx2x *bp) {}
+
+static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
+{
+ return NULL;
+}
+
+static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {}
+static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
+static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
+static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
+
+static inline void bnx2x_iov_task(struct work_struct *work) {}
+static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {}
+
+#endif /* CONFIG_BNX2X_SRIOV */
+#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 89ec0667140..ca47665f94b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1,12 +1,12 @@
/* bnx2x_stats.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -19,7 +19,7 @@
#include "bnx2x_stats.h"
#include "bnx2x_cmn.h"
-
+#include "bnx2x_sriov.h"
/* Statistics */
@@ -79,6 +79,42 @@ static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
* Init service functions
*/
+static void bnx2x_dp_stats(struct bnx2x *bp)
+{
+ int i;
+
+ DP(BNX2X_MSG_STATS, "dumping stats:\n"
+ "fw_stats_req\n"
+ " hdr\n"
+ " cmd_num %d\n"
+ " reserved0 %d\n"
+ " drv_stats_counter %d\n"
+ " reserved1 %d\n"
+ " stats_counters_addrs %x %x\n",
+ bp->fw_stats_req->hdr.cmd_num,
+ bp->fw_stats_req->hdr.reserved0,
+ bp->fw_stats_req->hdr.drv_stats_counter,
+ bp->fw_stats_req->hdr.reserved1,
+ bp->fw_stats_req->hdr.stats_counters_addrs.hi,
+ bp->fw_stats_req->hdr.stats_counters_addrs.lo);
+
+ for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) {
+ DP(BNX2X_MSG_STATS,
+ "query[%d]\n"
+ " kind %d\n"
+ " index %d\n"
+ " funcID %d\n"
+ " reserved %d\n"
+ " address %x %x\n",
+ i, bp->fw_stats_req->query[i].kind,
+ bp->fw_stats_req->query[i].index,
+ bp->fw_stats_req->query[i].funcID,
+ bp->fw_stats_req->query[i].reserved,
+ bp->fw_stats_req->query[i].address.hi,
+ bp->fw_stats_req->query[i].address.lo);
+ }
+}
+
/* Post the next statistics ramrod. Protect it with the spin in
* order to ensure the strict order between statistics ramrods
* (each ramrod has a sequence number passed in a
@@ -103,7 +139,9 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
bp->fw_stats_req->hdr.drv_stats_counter);
-
+ /* adjust the ramrod to include VF queues statistics */
+ bnx2x_iov_adjust_stats_req(bp);
+ bnx2x_dp_stats(bp);
/* send FW stats ramrod */
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
@@ -158,7 +196,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
} else if (bp->func_stx) {
*stats_comp = 0;
- bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+ bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);
}
}
@@ -174,7 +212,7 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
break;
}
cnt--;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
return 1;
}
@@ -183,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
* Statistics service functions
*/
-static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+/* should be called under stats_sema */
+static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
{
struct dmae_command *dmae;
u32 opcode;
@@ -480,29 +519,61 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
*stats_comp = 0;
}
-static void bnx2x_stats_start(struct bnx2x *bp)
+/* should be called under stats_sema */
+static void __bnx2x_stats_start(struct bnx2x *bp)
{
- if (bp->port.pmf)
- bnx2x_port_stats_init(bp);
+ if (IS_PF(bp)) {
+ if (bp->port.pmf)
+ bnx2x_port_stats_init(bp);
- else if (bp->func_stx)
- bnx2x_func_stats_init(bp);
+ else if (bp->func_stx)
+ bnx2x_func_stats_init(bp);
- bnx2x_hw_stats_post(bp);
- bnx2x_storm_stats_post(bp);
+ bnx2x_hw_stats_post(bp);
+ bnx2x_storm_stats_post(bp);
+ }
+
+ bp->stats_started = true;
+}
+
+static void bnx2x_stats_start(struct bnx2x *bp)
+{
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
}
static void bnx2x_stats_pmf_start(struct bnx2x *bp)
{
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
- bnx2x_stats_pmf_update(bp);
- bnx2x_stats_start(bp);
+ __bnx2x_stats_pmf_update(bp);
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
+}
+
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+{
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+ __bnx2x_stats_pmf_update(bp);
+ up(&bp->stats_sema);
}
static void bnx2x_stats_restart(struct bnx2x *bp)
{
+ /* vfs travel through here as part of the statistics FSM, but no action
+ * is required
+ */
+ if (IS_VF(bp))
+ return;
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
- bnx2x_stats_start(bp);
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
}
static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -832,25 +903,14 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
return 0;
}
-static int bnx2x_storm_stats_update(struct bnx2x *bp)
+static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
{
- struct tstorm_per_port_stats *tport =
- &bp->fw_stats_data->port.tstorm_port_statistics;
- struct tstorm_per_pf_stats *tfunc =
- &bp->fw_stats_data->pf.tstorm_pf_statistics;
- struct host_func_stats *fstats = &bp->func_stats;
- struct bnx2x_eth_stats *estats = &bp->eth_stats;
- struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
- int i;
u16 cur_stats_counter;
-
/* Make sure we use the value of the counter
* used for sending the last stats ramrod.
*/
- spin_lock_bh(&bp->stats_lock);
cur_stats_counter = bp->stats_counter - 1;
- spin_unlock_bh(&bp->stats_lock);
/* are storm stats valid? */
if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
@@ -880,6 +940,23 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
le16_to_cpu(counters->tstats_counter), bp->stats_counter);
return -EAGAIN;
}
+ return 0;
+}
+
+static int bnx2x_storm_stats_update(struct bnx2x *bp)
+{
+ struct tstorm_per_port_stats *tport =
+ &bp->fw_stats_data->port.tstorm_port_statistics;
+ struct tstorm_per_pf_stats *tfunc =
+ &bp->fw_stats_data->pf.tstorm_pf_statistics;
+ struct host_func_stats *fstats = &bp->func_stats;
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
+ int i;
+
+ /* vfs stat counter is managed by pf */
+ if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp))
+ return -EAGAIN;
estats->error_bytes_received_hi = 0;
estats->error_bytes_received_lo = 0;
@@ -945,7 +1022,6 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
qstats->valid_bytes_received_lo =
qstats->total_bytes_received_lo;
-
UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
total_unicast_packets_received);
UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
@@ -953,8 +1029,8 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
total_broadcast_packets_received);
UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
- etherstatsoverrsizepkts);
- UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard);
+ etherstatsoverrsizepkts, 32);
+ UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
SUB_EXTEND_USTAT(ucast_no_buff_pkts,
total_unicast_packets_received);
@@ -1033,15 +1109,15 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
estats->total_bytes_received_lo,
estats->rx_stat_ifhcinbadoctets_lo);
- ADD_64(estats->total_bytes_received_hi,
- le32_to_cpu(tfunc->rcv_error_bytes.hi),
- estats->total_bytes_received_lo,
- le32_to_cpu(tfunc->rcv_error_bytes.lo));
+ ADD_64_LE(estats->total_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ estats->total_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
- ADD_64(estats->error_bytes_received_hi,
- le32_to_cpu(tfunc->rcv_error_bytes.hi),
- estats->error_bytes_received_lo,
- le32_to_cpu(tfunc->rcv_error_bytes.lo));
+ ADD_64_LE(estats->error_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ estats->error_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
@@ -1171,26 +1247,43 @@ static void bnx2x_stats_update(struct bnx2x *bp)
{
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
- if (bnx2x_edebug_stats_stopped(bp))
+ /* we run update from timer context, so give up
+ * if somebody is in the middle of transition
+ */
+ if (down_trylock(&bp->stats_sema))
return;
- if (*stats_comp != DMAE_COMP_VAL)
- return;
+ if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
+ goto out;
- if (bp->port.pmf)
- bnx2x_hw_stats_update(bp);
+ if (IS_PF(bp)) {
+ if (*stats_comp != DMAE_COMP_VAL)
+ goto out;
- if (bnx2x_storm_stats_update(bp)) {
- if (bp->stats_pending++ == 3) {
- BNX2X_ERR("storm stats were not updated for 3 times\n");
- bnx2x_panic();
+ if (bp->port.pmf)
+ bnx2x_hw_stats_update(bp);
+
+ if (bnx2x_storm_stats_update(bp)) {
+ if (bp->stats_pending++ == 3) {
+ BNX2X_ERR("storm stats were not updated for 3 times\n");
+ bnx2x_panic();
+ }
+ goto out;
}
- return;
+ } else {
+ /* vf doesn't collect HW statistics, and doesn't get completions
+ * perform only update
+ */
+ bnx2x_storm_stats_update(bp);
}
bnx2x_net_stats_update(bp);
bnx2x_drv_stats_update(bp);
+ /* vf is done */
+ if (IS_VF(bp))
+ goto out;
+
if (netif_msg_timer(bp)) {
struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1200,6 +1293,9 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_storm_stats_post(bp);
+
+out:
+ up(&bp->stats_sema);
}
static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1265,6 +1361,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
{
int update = 0;
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+
+ bp->stats_started = false;
+
bnx2x_stats_comp(bp);
if (bp->port.pmf)
@@ -1281,6 +1382,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_stats_comp(bp);
}
+
+ up(&bp->stats_sema);
}
static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1309,15 +1412,17 @@ static const struct {
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
{
enum bnx2x_stats_state state;
+ void (*action)(struct bnx2x *bp);
if (unlikely(bp->panic))
return;
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
+ action = bnx2x_stats_stm[state][event].action;
spin_unlock_bh(&bp->stats_lock);
- bnx2x_stats_stm[state][event].action(bp);
+ action(bp);
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -1479,11 +1584,51 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
}
}
+void bnx2x_memset_stats(struct bnx2x *bp)
+{
+ int i;
+
+ /* function stats */
+ for_each_queue(bp, i) {
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
+
+ memset(&fp_stats->old_tclient, 0,
+ sizeof(fp_stats->old_tclient));
+ memset(&fp_stats->old_uclient, 0,
+ sizeof(fp_stats->old_uclient));
+ memset(&fp_stats->old_xclient, 0,
+ sizeof(fp_stats->old_xclient));
+ if (bp->stats_init) {
+ memset(&fp_stats->eth_q_stats, 0,
+ sizeof(fp_stats->eth_q_stats));
+ memset(&fp_stats->eth_q_stats_old, 0,
+ sizeof(fp_stats->eth_q_stats_old));
+ }
+ }
+
+ memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+
+ if (bp->stats_init) {
+ memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
+ memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
+ memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
+ memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
+ memset(&bp->func_stats, 0, sizeof(bp->func_stats));
+ }
+
+ bp->stats_state = STATS_STATE_DISABLED;
+
+ if (bp->port.pmf && bp->port.port_stx)
+ bnx2x_port_stats_base_init(bp);
+
+ /* mark the end of statistics initializiation */
+ bp->stats_init = false;
+}
+
void bnx2x_stats_init(struct bnx2x *bp)
{
int /*abs*/port = BP_PORT(bp);
int mb_idx = BP_FW_MB_IDX(bp);
- int i;
bp->stats_pending = 0;
bp->executer_idx = 0;
@@ -1519,36 +1664,11 @@ void bnx2x_stats_init(struct bnx2x *bp)
&(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
}
- /* function stats */
- for_each_queue(bp, i) {
- struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
-
- memset(&fp_stats->old_tclient, 0,
- sizeof(fp_stats->old_tclient));
- memset(&fp_stats->old_uclient, 0,
- sizeof(fp_stats->old_uclient));
- memset(&fp_stats->old_xclient, 0,
- sizeof(fp_stats->old_xclient));
- if (bp->stats_init) {
- memset(&fp_stats->eth_q_stats, 0,
- sizeof(fp_stats->eth_q_stats));
- memset(&fp_stats->eth_q_stats_old, 0,
- sizeof(fp_stats->eth_q_stats_old));
- }
- }
-
/* Prepare statistics ramrod data */
bnx2x_prep_fw_stats_req(bp);
- memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+ /* Clean SP from previous statistics */
if (bp->stats_init) {
- memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
- memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
- memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
- memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
- memset(&bp->func_stats, 0, sizeof(bp->func_stats));
-
- /* Clean SP from previous statistics */
if (bp->func_stx) {
memset(bnx2x_sp(bp, func_stats), 0,
sizeof(struct host_func_stats));
@@ -1558,13 +1678,7 @@ void bnx2x_stats_init(struct bnx2x *bp)
}
}
- bp->stats_state = STATS_STATE_DISABLED;
-
- if (bp->port.pmf && bp->port.port_stx)
- bnx2x_port_stats_base_init(bp);
-
- /* mark the end of statistics initializiation */
- bp->stats_init = false;
+ bnx2x_memset_stats(bp);
}
void bnx2x_save_statistics(struct bnx2x *bp)
@@ -1879,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
estats->mac_discard);
}
}
+
+void bnx2x_stats_safe_exec(struct bnx2x *bp,
+ void (func_to_exec)(void *cookie),
+ void *cookie){
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+ bnx2x_stats_comp(bp);
+ func_to_exec(cookie);
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index b4d7b26c7fe..2beceaefdee 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -1,12 +1,12 @@
/* bnx2x_stats.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -40,7 +40,6 @@ struct nig_stats {
u32 egress_mac_pkt1_hi;
};
-
enum bnx2x_stats_event {
STATS_EVENT_PMF = 0,
STATS_EVENT_LINK_UP,
@@ -208,7 +207,6 @@ struct bnx2x_eth_stats {
u32 eee_tx_lpi;
};
-
struct bnx2x_eth_q_stats {
u32 total_unicast_bytes_received_hi;
u32 total_unicast_bytes_received_lo;
@@ -331,7 +329,6 @@ struct bnx2x_fw_port_stats_old {
u32 mac_discard;
};
-
/****************************************************************************
* Macros
****************************************************************************/
@@ -421,16 +418,19 @@ struct bnx2x_fw_port_stats_old {
new->s); \
} while (0)
-#define UPDATE_EXTEND_TSTAT(s, t) \
+#define UPDATE_EXTEND_TSTAT_X(s, t, size) \
do { \
- diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
+ diff = le##size##_to_cpu(tclient->s) - \
+ le##size##_to_cpu(old_tclient->s); \
old_tclient->s = tclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
-#define UPDATE_EXTEND_E_TSTAT(s, t) \
+#define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32)
+
+#define UPDATE_EXTEND_E_TSTAT(s, t, size) \
do { \
- UPDATE_EXTEND_TSTAT(s, t); \
+ UPDATE_EXTEND_TSTAT_X(s, t, size); \
ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
} while (0)
@@ -456,8 +456,9 @@ struct bnx2x_fw_port_stats_old {
#define UPDATE_QSTAT(s, t) \
do { \
- qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \
qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \
+ qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \
+ + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \
} while (0)
#define UPDATE_QSTAT_OLD(f) \
@@ -532,13 +533,15 @@ struct bnx2x_fw_port_stats_old {
SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
-
/* forward */
struct bnx2x;
+void bnx2x_memset_stats(struct bnx2x *bp);
void bnx2x_stats_init(struct bnx2x *bp);
-
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
+void bnx2x_stats_safe_exec(struct bnx2x *bp,
+ void (func_to_exec)(void *cookie),
+ void *cookie);
/**
* bnx2x_save_statistics - save statistics when unloading.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
new file mode 100644
index 00000000000..d712d0ddd71
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -0,0 +1,2034 @@
+/* bnx2x_vfpf.c: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
+ */
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include <linux/crc32.h>
+
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
+
+/* place a given tlv on the tlv buffer at a given offset */
+static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
+ u16 offset, u16 type, u16 length)
+{
+ struct channel_tlv *tl =
+ (struct channel_tlv *)(tlvs_list + offset);
+
+ tl->type = type;
+ tl->length = length;
+}
+
+/* Clear the mailbox and init the header of the first tlv */
+static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
+ u16 type, u16 length)
+{
+ mutex_lock(&bp->vf2pf_mutex);
+
+ DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
+ type);
+
+ /* Clear mailbox */
+ memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
+
+ /* init type and length */
+ bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
+
+ /* init first tlv header */
+ first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
+}
+
+/* releases the mailbox */
+static void bnx2x_vfpf_finalize(struct bnx2x *bp,
+ struct vfpf_first_tlv *first_tlv)
+{
+ DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
+ first_tlv->tl.type);
+
+ mutex_unlock(&bp->vf2pf_mutex);
+}
+
+/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
+static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
+ enum channel_tlvs req_tlv)
+{
+ struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+ do {
+ if (tlv->type == req_tlv)
+ return tlv;
+
+ if (!tlv->length) {
+ BNX2X_ERR("Found TLV with length 0\n");
+ return NULL;
+ }
+
+ tlvs_list += tlv->length;
+ tlv = (struct channel_tlv *)tlvs_list;
+ } while (tlv->type != CHANNEL_TLV_LIST_END);
+
+ DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
+
+ return NULL;
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
+{
+ int i = 1;
+ struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+ while (tlv->type != CHANNEL_TLV_LIST_END) {
+ /* output tlv */
+ DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
+ tlv->type, tlv->length);
+
+ /* advance to next tlv */
+ tlvs_list += tlv->length;
+
+ /* cast general tlv list pointer to channel tlv header*/
+ tlv = (struct channel_tlv *)tlvs_list;
+
+ i++;
+
+ /* break condition for this loop */
+ if (i > MAX_TLVS_IN_LIST) {
+ WARN(true, "corrupt tlvs");
+ return;
+ }
+ }
+
+ /* output last tlv */
+ DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
+ tlv->type, tlv->length);
+}
+
+/* test whether we support a tlv type */
+bool bnx2x_tlv_supported(u16 tlvtype)
+{
+ return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
+}
+
+static inline int bnx2x_pfvf_status_codes(int rc)
+{
+ switch (rc) {
+ case 0:
+ return PFVF_STATUS_SUCCESS;
+ case -ENOMEM:
+ return PFVF_STATUS_NO_RESOURCE;
+ default:
+ return PFVF_STATUS_FAILURE;
+ }
+}
+
+static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
+{
+ struct cstorm_vf_zone_data __iomem *zone_data =
+ REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
+ int tout = 100, interval = 100; /* wait for 10 seconds */
+
+ if (*done) {
+ BNX2X_ERR("done was non zero before message to pf was sent\n");
+ WARN_ON(true);
+ return -EINVAL;
+ }
+
+ /* if PF indicated channel is down avoid sending message. Return success
+ * so calling flow can continue
+ */
+ bnx2x_sample_bulletin(bp);
+ if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
+ DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
+ *done = PFVF_STATUS_SUCCESS;
+ return -EINVAL;
+ }
+
+ /* Write message address */
+ writel(U64_LO(msg_mapping),
+ &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
+ writel(U64_HI(msg_mapping),
+ &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
+
+ /* make sure the address is written before FW accesses it */
+ wmb();
+
+ /* Trigger the PF FW */
+ writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
+
+ /* Wait for PF to complete */
+ while ((tout >= 0) && (!*done)) {
+ msleep(interval);
+ tout -= 1;
+
+ /* progress indicator - HV can take its own sweet time in
+ * answering VFs...
+ */
+ DP_CONT(BNX2X_MSG_IOV, ".");
+ }
+
+ if (!*done) {
+ BNX2X_ERR("PF response has timed out\n");
+ return -EAGAIN;
+ }
+ DP(BNX2X_MSG_SP, "Got a response from PF\n");
+ return 0;
+}
+
+static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
+{
+ u32 me_reg;
+ int tout = 10, interval = 100; /* Wait for 1 sec */
+
+ do {
+ /* pxp traps vf read of doorbells and returns me reg value */
+ me_reg = readl(bp->doorbells);
+ if (GOOD_ME_REG(me_reg))
+ break;
+
+ msleep(interval);
+
+ BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
+ me_reg);
+ } while (tout-- > 0);
+
+ if (!GOOD_ME_REG(me_reg)) {
+ BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
+
+ *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
+
+ return 0;
+}
+
+int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
+{
+ int rc = 0, attempts = 0;
+ struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
+ struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
+ struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
+ u32 vf_id;
+ bool resources_acquired = false;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+
+ if (bnx2x_get_vf_id(bp, &vf_id)) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ req->vfdev_info.vf_id = vf_id;
+ req->vfdev_info.vf_os = 0;
+
+ req->resc_request.num_rxqs = rx_count;
+ req->resc_request.num_txqs = tx_count;
+ req->resc_request.num_sbs = bp->igu_sb_cnt;
+ req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
+ req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
+
+ /* pf 2 vf bulletin board address */
+ req->bulletin_addr = bp->pf2vf_bulletin_mapping;
+
+ /* Request physical port identifier */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
+ CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req,
+ req->first_tlv.tl.length + sizeof(struct channel_tlv),
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ while (!resources_acquired) {
+ DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
+
+ /* send acquire request */
+ rc = bnx2x_send_msg2pf(bp,
+ &resp->hdr.status,
+ bp->vf2pf_mbox_mapping);
+
+ /* PF timeout */
+ if (rc)
+ goto out;
+
+ /* copy acquire response from buffer to bp */
+ memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
+
+ attempts++;
+
+ /* test whether the PF accepted our request. If not, humble
+ * the request and try again.
+ */
+ if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
+ DP(BNX2X_MSG_SP, "resources acquired\n");
+ resources_acquired = true;
+ } else if (bp->acquire_resp.hdr.status ==
+ PFVF_STATUS_NO_RESOURCE &&
+ attempts < VF_ACQUIRE_THRESH) {
+ DP(BNX2X_MSG_SP,
+ "PF unwilling to fulfill resource request. Try PF recommended amount\n");
+
+ /* humble our request */
+ req->resc_request.num_txqs =
+ min(req->resc_request.num_txqs,
+ bp->acquire_resp.resc.num_txqs);
+ req->resc_request.num_rxqs =
+ min(req->resc_request.num_rxqs,
+ bp->acquire_resp.resc.num_rxqs);
+ req->resc_request.num_sbs =
+ min(req->resc_request.num_sbs,
+ bp->acquire_resp.resc.num_sbs);
+ req->resc_request.num_mac_filters =
+ min(req->resc_request.num_mac_filters,
+ bp->acquire_resp.resc.num_mac_filters);
+ req->resc_request.num_vlan_filters =
+ min(req->resc_request.num_vlan_filters,
+ bp->acquire_resp.resc.num_vlan_filters);
+ req->resc_request.num_mc_filters =
+ min(req->resc_request.num_mc_filters,
+ bp->acquire_resp.resc.num_mc_filters);
+
+ /* Clear response buffer */
+ memset(&bp->vf2pf_mbox->resp, 0,
+ sizeof(union pfvf_tlvs));
+ } else {
+ /* PF reports error */
+ BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
+ bp->acquire_resp.hdr.status);
+ rc = -EAGAIN;
+ goto out;
+ }
+ }
+
+ /* Retrieve physical port id (if possible) */
+ phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
+ bnx2x_search_tlv_list(bp, resp,
+ CHANNEL_TLV_PHYS_PORT_ID);
+ if (phys_port_resp) {
+ memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
+ bp->flags |= HAS_PHYS_PORT_ID;
+ }
+
+ /* get HW info */
+ bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
+ bp->link_params.chip_id = bp->common.chip_id;
+ bp->db_size = bp->acquire_resp.pfdev_info.db_size;
+ bp->common.int_block = INT_BLOCK_IGU;
+ bp->common.chip_port_mode = CHIP_2_PORT_MODE;
+ bp->igu_dsb_id = -1;
+ bp->mf_ov = 0;
+ bp->mf_mode = 0;
+ bp->common.flash_size = 0;
+ bp->flags |=
+ NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
+ bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
+ bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
+ strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
+ sizeof(bp->fw_ver));
+
+ if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
+ memcpy(bp->dev->dev_addr,
+ bp->acquire_resp.resc.current_mac_addr,
+ ETH_ALEN);
+
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+ return rc;
+}
+
+int bnx2x_vfpf_release(struct bnx2x *bp)
+{
+ struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ u32 rc, vf_id;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ if (bnx2x_get_vf_id(bp, &vf_id)) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ req->vf_id = vf_id;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send release request */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc)
+ /* PF timeout */
+ goto out;
+
+ if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+ /* PF released us */
+ DP(BNX2X_MSG_SP, "vf released\n");
+ } else {
+ /* PF reports error */
+ BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
+ resp->hdr.status);
+ rc = -EAGAIN;
+ goto out;
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+/* Tell PF about SB addresses */
+int bnx2x_vfpf_init(struct bnx2x *bp)
+{
+ struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc, i;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
+
+ /* status blocks */
+ for_each_eth_queue(bp, i)
+ req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
+ status_blk_mapping);
+
+ /* statistics - requests only supports single queue for now */
+ req->stats_addr = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats);
+
+ req->stats_stride = sizeof(struct per_queue_stats);
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ goto out;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
+ resp->hdr.status);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+/* CLOSE VF - opposite to INIT_VF */
+void bnx2x_vfpf_close_vf(struct bnx2x *bp)
+{
+ struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int i, rc;
+ u32 vf_id;
+
+ /* If we haven't got a valid VF id, there is no sense to
+ * continue with sending messages
+ */
+ if (bnx2x_get_vf_id(bp, &vf_id))
+ goto free_irq;
+
+ /* Close the queues */
+ for_each_queue(bp, i)
+ bnx2x_vfpf_teardown_queue(bp, i);
+
+ /* remove mac */
+ bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+ req->vf_id = vf_id;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc)
+ BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
+
+ else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
+ resp->hdr.status);
+
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+free_irq:
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 0);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+}
+
+static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q)
+{
+ u8 cl_id = vfq_cl_id(vf, q);
+ u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ /* mac */
+ bnx2x_init_mac_obj(bp, &q->mac_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, mac_rdata),
+ bnx2x_vf_sp_map(bp, vf, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->macs_pool);
+ /* vlan */
+ bnx2x_init_vlan_obj(bp, &q->vlan_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, vlan_rdata),
+ bnx2x_vf_sp_map(bp, vf, vlan_rdata),
+ BNX2X_FILTER_VLAN_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->vlans_pool);
+
+ /* mcast */
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
+ q->cid, func_id, func_id,
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ /* rss */
+ bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
+ func_id, func_id,
+ bnx2x_vf_sp(bp, vf, rss_rdata),
+ bnx2x_vf_sp_map(bp, vf, rss_rdata),
+ BNX2X_FILTER_RSS_CONF_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ vf->leading_rss = cl_id;
+ q->is_leading = true;
+ q->sp_initialized = true;
+}
+
+/* ask the pf to open a queue for the vf */
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool is_leading)
+{
+ struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ u8 fp_idx = fp->index;
+ u16 tpa_agg_size = 0, flags = 0;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
+
+ /* select tpa mode to request */
+ if (!fp->disable_tpa) {
+ flags |= VFPF_QUEUE_FLG_TPA;
+ flags |= VFPF_QUEUE_FLG_TPA_IPV6;
+ if (fp->mode == TPA_MODE_GRO)
+ flags |= VFPF_QUEUE_FLG_TPA_GRO;
+ tpa_agg_size = TPA_AGG_SIZE;
+ }
+
+ if (is_leading)
+ flags |= VFPF_QUEUE_FLG_LEADING_RSS;
+
+ /* calculate queue flags */
+ flags |= VFPF_QUEUE_FLG_STATS;
+ flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
+ flags |= VFPF_QUEUE_FLG_VLAN;
+ DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
+
+ /* Common */
+ req->vf_qid = fp_idx;
+ req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
+
+ /* Rx */
+ req->rxq.rcq_addr = fp->rx_comp_mapping;
+ req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
+ req->rxq.rxq_addr = fp->rx_desc_mapping;
+ req->rxq.sge_addr = fp->rx_sge_mapping;
+ req->rxq.vf_sb = fp_idx;
+ req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
+ req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
+ req->rxq.mtu = bp->dev->mtu;
+ req->rxq.buf_sz = fp->rx_buf_size;
+ req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
+ req->rxq.tpa_agg_sz = tpa_agg_size;
+ req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
+ req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
+ (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
+ req->rxq.flags = flags;
+ req->rxq.drop_flags = 0;
+ req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+ req->rxq.stat_id = -1; /* No stats at the moment */
+
+ /* Tx */
+ req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
+ req->txq.vf_sb = fp_idx;
+ req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
+ req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
+ req->txq.flags = flags;
+ req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
+ fp_idx);
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
+ fp_idx, resp->hdr.status);
+ rc = -EINVAL;
+ }
+
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
+{
+ struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
+ sizeof(*req));
+
+ req->vf_qid = qidx;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc) {
+ BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
+ rc);
+ goto out;
+ }
+
+ /* PF failed the transaction */
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
+ resp->hdr.status);
+ rc = -EINVAL;
+ }
+
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+/* request pf to add a mac for the vf */
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
+{
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
+ int rc = 0;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+ req->vf_qid = vf_qid;
+ req->n_mac_vlan_filters = 1;
+
+ req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
+ if (set)
+ req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
+
+ /* sample bulletin board for new mac */
+ bnx2x_sample_bulletin(bp);
+
+ /* copy mac from device to request */
+ memcpy(req->filters[0].mac, addr, ETH_ALEN);
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ goto out;
+ }
+
+ /* failure may mean PF was configured with a new mac for us */
+ while (resp->hdr.status == PFVF_STATUS_FAILURE) {
+ DP(BNX2X_MSG_IOV,
+ "vfpf SET MAC failed. Check bulletin board for new posts\n");
+
+ /* copy mac from bulletin to device */
+ memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
+
+ /* check if bulletin board was updated */
+ if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
+ /* copy mac from device to request */
+ memcpy(req->filters[0].mac, bp->dev->dev_addr,
+ ETH_ALEN);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
+ bp->vf2pf_mbox_mapping);
+ } else {
+ /* no new info in bulletin */
+ break;
+ }
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
+ rc = -EINVAL;
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+/* request pf to config rss table for vf queues*/
+int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params)
+{
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
+ int rc = 0;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
+ sizeof(*req));
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+ memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
+ req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
+ req->rss_key_size = T_ETH_RSS_KEY;
+ req->rss_result_mask = params->rss_result_mask;
+
+ /* flags handled individually for backward/forward compatability */
+ if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
+ req->rss_flags |= VFPF_RSS_MODE_DISABLED;
+ if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
+ req->rss_flags |= VFPF_RSS_MODE_REGULAR;
+ if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
+ req->rss_flags |= VFPF_RSS_SET_SRCH;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
+ req->rss_flags |= VFPF_RSS_IPV4;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
+ req->rss_flags |= VFPF_RSS_IPV4_TCP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
+ req->rss_flags |= VFPF_RSS_IPV4_UDP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
+ req->rss_flags |= VFPF_RSS_IPV6;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
+ req->rss_flags |= VFPF_RSS_IPV6_TCP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
+ req->rss_flags |= VFPF_RSS_IPV6_UDP;
+
+ DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ goto out;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ /* Since older drivers don't support this feature (and VF has
+ * no way of knowing other than failing this), don't propagate
+ * an error in this case.
+ */
+ DP(BNX2X_MSG_IOV,
+ "Failed to send rss message to PF over VF-PF channel [%d]\n",
+ resp->hdr.status);
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+int bnx2x_vfpf_set_mcast(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc, i = 0;
+ struct netdev_hw_addr *ha;
+
+ if (bp->state != BNX2X_STATE_OPEN) {
+ DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
+ return -EINVAL;
+ }
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ /* Get Rx mode requested */
+ DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
+ bnx2x_mc_addr(ha));
+ memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
+ i++;
+ }
+
+ /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
+ * addresses tops
+ */
+ if (i >= PFVF_MAX_MULTICAST_PER_VF) {
+ DP(NETIF_MSG_IFUP,
+ "VF supports not more than %d multicast MAC addresses\n",
+ PFVF_MAX_MULTICAST_PER_VF);
+ return -EINVAL;
+ }
+
+ req->n_multicast = i;
+ req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
+ req->vf_qid = 0;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("Sending a message failed: %d\n", rc);
+ goto out;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
+ resp->hdr.status);
+ rc = -EINVAL;
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return 0;
+}
+
+int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
+{
+ int mode = bp->rx_mode;
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
+
+ /* Ignore everything accept MODE_NONE */
+ if (mode == BNX2X_RX_MODE_NONE) {
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
+ } else {
+ /* Current PF driver will not look at the specific flags,
+ * but they are required when working with older drivers on hv.
+ */
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ }
+
+ req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
+ req->vf_qid = 0;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ BNX2X_ERR("Sending a message failed: %d\n", rc);
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
+ rc = -EINVAL;
+ }
+
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+/* General service functions */
+static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
+
+ REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
+}
+
+static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
+
+ REG_WR8(bp, addr, 1);
+}
+
+static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_vf(bp, i)
+ storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
+}
+
+/* enable vf_pf mailbox (aka vf-pf-channel) */
+void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
+{
+ bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
+
+ /* enable the mailbox in the FW */
+ storm_memset_vf_mbx_ack(bp, abs_vfid);
+ storm_memset_vf_mbx_valid(bp, abs_vfid);
+
+ /* enable the VF access to the mailbox */
+ bnx2x_vf_enable_access(bp, abs_vfid);
+}
+
+/* this works only on !E1h */
+static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
+ dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
+ u32 vf_addr_lo, u32 len32)
+{
+ struct dmae_command dmae;
+
+ if (CHIP_IS_E1x(bp)) {
+ BNX2X_ERR("Chip revision does not support VFs\n");
+ return DMAE_NOT_RDY;
+ }
+
+ if (!bp->dmae_ready) {
+ BNX2X_ERR("DMAE is not ready, can not copy\n");
+ return DMAE_NOT_RDY;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
+
+ if (from_vf) {
+ dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
+ (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
+ (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
+
+ dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
+
+ dmae.src_addr_lo = vf_addr_lo;
+ dmae.src_addr_hi = vf_addr_hi;
+ dmae.dst_addr_lo = U64_LO(pf_addr);
+ dmae.dst_addr_hi = U64_HI(pf_addr);
+ } else {
+ dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
+ (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
+ (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
+
+ dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
+
+ dmae.src_addr_lo = U64_LO(pf_addr);
+ dmae.src_addr_hi = U64_HI(pf_addr);
+ dmae.dst_addr_lo = vf_addr_lo;
+ dmae.dst_addr_hi = vf_addr_hi;
+ }
+ dmae.len = len32;
+
+ /* issue the command and wait for completion */
+ return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
+}
+
+static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+ u16 length, type;
+
+ /* prepare response */
+ type = mbx->first_tlv.tl.type;
+ length = type == CHANNEL_TLV_ACQUIRE ?
+ sizeof(struct pfvf_acquire_resp_tlv) :
+ sizeof(struct pfvf_general_resp_tlv);
+ bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
+ bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+}
+
+static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ int vf_rc)
+{
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+ struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
+ dma_addr_t pf_addr;
+ u64 vf_addr;
+ int rc;
+
+ bnx2x_dp_tlv_list(bp, resp);
+ DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
+ mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+
+ resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
+
+ /* send response */
+ vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
+ mbx->first_tlv.resp_msg_offset;
+ pf_addr = mbx->msg_mapping +
+ offsetof(struct bnx2x_vf_mbx_msg, resp);
+
+ /* Copy the response buffer. The first u64 is written afterwards, as
+ * the vf is sensitive to the header being written
+ */
+ vf_addr += sizeof(u64);
+ pf_addr += sizeof(u64);
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+ U64_HI(vf_addr),
+ U64_LO(vf_addr),
+ (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy response body to VF %d\n",
+ vf->abs_vfid);
+ goto mbx_error;
+ }
+ vf_addr -= sizeof(u64);
+ pf_addr -= sizeof(u64);
+
+ /* ack the FW */
+ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+ mmiowb();
+
+ /* copy the response header including status-done field,
+ * must be last dmae, must be after FW is acked
+ */
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+ U64_HI(vf_addr),
+ U64_LO(vf_addr),
+ sizeof(u64)/4);
+
+ /* unlock channel mutex */
+ bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
+
+ if (rc) {
+ BNX2X_ERR("Failed to copy response status to VF %d\n",
+ vf->abs_vfid);
+ goto mbx_error;
+ }
+ return;
+
+mbx_error:
+ bnx2x_vf_release(bp, vf);
+}
+
+static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ int rc)
+{
+ bnx2x_vf_mbx_resp_single_tlv(bp, vf);
+ bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ void *buffer,
+ u16 *offset)
+{
+ struct vfpf_port_phys_id_resp_tlv *port_id;
+
+ if (!(bp->flags & HAS_PHYS_PORT_ID))
+ return;
+
+ bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
+ sizeof(struct vfpf_port_phys_id_resp_tlv));
+
+ port_id = (struct vfpf_port_phys_id_resp_tlv *)
+ (((u8 *)buffer) + *offset);
+ memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
+
+ /* Offset should continue representing the offset to the tail
+ * of TLV data (outside this function scope)
+ */
+ *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
+}
+
+static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx, int vfop_status)
+{
+ int i;
+ struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
+ struct pf_vf_resc *resc = &resp->resc;
+ u8 status = bnx2x_pfvf_status_codes(vfop_status);
+ u16 length;
+
+ memset(resp, 0, sizeof(*resp));
+
+ /* fill in pfdev info */
+ resp->pfdev_info.chip_num = bp->common.chip_id;
+ resp->pfdev_info.db_size = bp->db_size;
+ resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
+ resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
+ PFVF_CAP_TPA |
+ PFVF_CAP_TPA_UPDATE);
+ bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
+ sizeof(resp->pfdev_info.fw_ver));
+
+ if (status == PFVF_STATUS_NO_RESOURCE ||
+ status == PFVF_STATUS_SUCCESS) {
+ /* set resources numbers, if status equals NO_RESOURCE these
+ * are max possible numbers
+ */
+ resc->num_rxqs = vf_rxq_count(vf) ? :
+ bnx2x_vf_max_queue_cnt(bp, vf);
+ resc->num_txqs = vf_txq_count(vf) ? :
+ bnx2x_vf_max_queue_cnt(bp, vf);
+ resc->num_sbs = vf_sb_count(vf);
+ resc->num_mac_filters = vf_mac_rules_cnt(vf);
+ resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
+ resc->num_mc_filters = 0;
+
+ if (status == PFVF_STATUS_SUCCESS) {
+ /* fill in the allocated resources */
+ struct pf_vf_bulletin_content *bulletin =
+ BP_VF_BULLETIN(bp, vf->index);
+
+ for_each_vfq(vf, i)
+ resc->hw_qid[i] =
+ vfq_qzone_id(vf, vfq_get(vf, i));
+
+ for_each_vf_sb(vf, i) {
+ resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
+ resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
+ }
+
+ /* if a mac has been set for this vf, supply it */
+ if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
+ memcpy(resc->current_mac_addr, bulletin->mac,
+ ETH_ALEN);
+ }
+ }
+ }
+
+ DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
+ "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
+ vf->abs_vfid,
+ resp->pfdev_info.chip_num,
+ resp->pfdev_info.db_size,
+ resp->pfdev_info.indices_per_sb,
+ resp->pfdev_info.pf_cap,
+ resc->num_rxqs,
+ resc->num_txqs,
+ resc->num_sbs,
+ resc->num_mac_filters,
+ resc->num_vlan_filters,
+ resc->num_mc_filters,
+ resp->pfdev_info.fw_ver);
+
+ DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
+ for (i = 0; i < vf_rxq_count(vf); i++)
+ DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
+ DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
+ for (i = 0; i < vf_sb_count(vf); i++)
+ DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
+ resc->hw_sbs[i].hw_sb_id,
+ resc->hw_sbs[i].sb_qid);
+ DP_CONT(BNX2X_MSG_IOV, "]\n");
+
+ /* prepare response */
+ length = sizeof(struct pfvf_acquire_resp_tlv);
+ bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
+
+ /* Handle possible VF requests for physical port identifiers.
+ * 'length' should continue to indicate the offset of the first empty
+ * place in the buffer (i.e., where next TLV should be inserted)
+ */
+ if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
+ CHANNEL_TLV_PHYS_PORT_ID))
+ bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
+
+ bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* send the response */
+ bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
+}
+
+static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int rc;
+ struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
+
+ /* log vfdef info */
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
+ vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
+ acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
+ acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
+ acquire->resc_request.num_vlan_filters,
+ acquire->resc_request.num_mc_filters);
+
+ /* acquire the resources */
+ rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
+
+ /* store address of vf's bulletin board */
+ vf->bulletin_map = acquire->bulletin_addr;
+
+ /* response */
+ bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
+}
+
+static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_init_tlv *init = &mbx->msg->req.init;
+ int rc;
+
+ /* record ghost addresses from vf message */
+ vf->spq_map = init->spq_addr;
+ vf->fw_stat_map = init->stats_addr;
+ vf->stats_stride = init->stats_stride;
+ rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
+
+ /* set VF multiqueue statistics collection mode */
+ if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
+ vf->cfg_flags |= VF_CFG_STATS_COALESCE;
+
+ /* response */
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+/* convert MBX queue-flags to standard SP queue-flags */
+static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
+ unsigned long *sp_q_flags)
+{
+ if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
+ __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
+ __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
+ __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
+ __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
+ __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
+ __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
+ __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
+ __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
+ __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
+
+ /* outer vlan removal is set according to PF's multi function mode */
+ if (IS_MF_SD(bp))
+ __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
+}
+
+static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
+ struct bnx2x_vf_queue_construct_params qctor;
+ int rc = 0;
+
+ /* verify vf_qid */
+ if (setup_q->vf_qid >= vf_rxq_count(vf)) {
+ BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
+ setup_q->vf_qid, vf_rxq_count(vf));
+ rc = -EINVAL;
+ goto response;
+ }
+
+ /* tx queues must be setup alongside rx queues thus if the rx queue
+ * is not marked as valid there's nothing to do.
+ */
+ if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
+ struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
+ unsigned long q_type = 0;
+
+ struct bnx2x_queue_init_params *init_p;
+ struct bnx2x_queue_setup_params *setup_p;
+
+ if (bnx2x_vfq_is_leading(q))
+ bnx2x_leading_vfq_init(bp, vf, q);
+
+ /* re-init the VF operation context */
+ memset(&qctor, 0 ,
+ sizeof(struct bnx2x_vf_queue_construct_params));
+ setup_p = &qctor.prep_qsetup;
+ init_p = &qctor.qstate.params.init;
+
+ /* activate immediately */
+ __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
+
+ if (setup_q->param_valid & VFPF_TXQ_VALID) {
+ struct bnx2x_txq_setup_params *txq_params =
+ &setup_p->txq_params;
+
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ /* save sb resource index */
+ q->sb_idx = setup_q->txq.vf_sb;
+
+ /* tx init */
+ init_p->tx.hc_rate = setup_q->txq.hc_rate;
+ init_p->tx.sb_cq_index = setup_q->txq.sb_index;
+
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
+ &init_p->tx.flags);
+
+ /* tx setup - flags */
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
+ &setup_p->flags);
+
+ /* tx setup - general, nothing */
+
+ /* tx setup - tx */
+ txq_params->dscr_map = setup_q->txq.txq_addr;
+ txq_params->sb_cq_index = setup_q->txq.sb_index;
+ txq_params->traffic_type = setup_q->txq.traffic_type;
+
+ bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
+ q->index, q->sb_idx);
+ }
+
+ if (setup_q->param_valid & VFPF_RXQ_VALID) {
+ struct bnx2x_rxq_setup_params *rxq_params =
+ &setup_p->rxq_params;
+
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+
+ /* Note: there is no support for different SBs
+ * for TX and RX
+ */
+ q->sb_idx = setup_q->rxq.vf_sb;
+
+ /* rx init */
+ init_p->rx.hc_rate = setup_q->rxq.hc_rate;
+ init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
+ &init_p->rx.flags);
+
+ /* rx setup - flags */
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
+ &setup_p->flags);
+
+ /* rx setup - general */
+ setup_p->gen_params.mtu = setup_q->rxq.mtu;
+
+ /* rx setup - rx */
+ rxq_params->drop_flags = setup_q->rxq.drop_flags;
+ rxq_params->dscr_map = setup_q->rxq.rxq_addr;
+ rxq_params->sge_map = setup_q->rxq.sge_addr;
+ rxq_params->rcq_map = setup_q->rxq.rcq_addr;
+ rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
+ rxq_params->buf_sz = setup_q->rxq.buf_sz;
+ rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
+ rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
+ rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
+ rxq_params->cache_line_log =
+ setup_q->rxq.cache_line_log;
+ rxq_params->sb_cq_index = setup_q->rxq.sb_index;
+
+ /* rx setup - multicast engine */
+ if (bnx2x_vfq_is_leading(q)) {
+ u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ rxq_params->mcast_engine_id = mcast_id;
+ __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
+ }
+
+ bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
+ q->index, q->sb_idx);
+ }
+ /* complete the preparations */
+ bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
+
+ rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
+ if (rc)
+ goto response;
+ }
+response:
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *tlv,
+ struct bnx2x_vf_mac_vlan_filters **pfl,
+ u32 type_flag)
+{
+ int i, j;
+ struct bnx2x_vf_mac_vlan_filters *fl = NULL;
+ size_t fsz;
+
+ fsz = tlv->n_mac_vlan_filters *
+ sizeof(struct bnx2x_vf_mac_vlan_filter) +
+ sizeof(struct bnx2x_vf_mac_vlan_filters);
+
+ fl = kzalloc(fsz, GFP_KERNEL);
+ if (!fl)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
+ struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
+
+ if ((msg_filter->flags & type_flag) != type_flag)
+ continue;
+ if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
+ fl->filters[j].mac = msg_filter->mac;
+ fl->filters[j].type = BNX2X_VF_FILTER_MAC;
+ } else {
+ fl->filters[j].vid = msg_filter->vlan_tag;
+ fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
+ }
+ fl->filters[j].add =
+ (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
+ true : false;
+ fl->count++;
+ }
+ if (!fl->count)
+ kfree(fl);
+ else
+ *pfl = fl;
+
+ return 0;
+}
+
+static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
+ struct vfpf_q_mac_vlan_filter *filter)
+{
+ DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
+ if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
+ DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
+ if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
+ DP_CONT(msglvl, ", MAC=%pM", filter->mac);
+ DP_CONT(msglvl, "\n");
+}
+
+static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
+ struct vfpf_set_q_filters_tlv *filters)
+{
+ int i;
+
+ if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
+ for (i = 0; i < filters->n_mac_vlan_filters; i++)
+ bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
+ &filters->filters[i]);
+
+ if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
+ DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
+
+ if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
+ for (i = 0; i < filters->n_multicast; i++)
+ DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
+}
+
+#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
+#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
+
+static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int rc = 0;
+
+ struct vfpf_set_q_filters_tlv *msg =
+ &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
+
+ /* check for any mac/vlan changes */
+ if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
+ /* build mac list */
+ struct bnx2x_vf_mac_vlan_filters *fl = NULL;
+
+ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_MAC_FILTER);
+ if (rc)
+ goto op_err;
+
+ if (fl) {
+
+ /* set mac list */
+ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+ msg->vf_qid,
+ false);
+ if (rc)
+ goto op_err;
+ }
+
+ /* build vlan list */
+ fl = NULL;
+
+ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_VLAN_FILTER);
+ if (rc)
+ goto op_err;
+
+ if (fl) {
+ /* set vlan list */
+ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+ msg->vf_qid,
+ false);
+ if (rc)
+ goto op_err;
+ }
+ }
+
+ if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
+ unsigned long accept = 0;
+ struct pf_vf_bulletin_content *bulletin =
+ BP_VF_BULLETIN(bp, vf->index);
+
+ /* Ignore VF requested mode; instead set a regular mode */
+ if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) {
+ __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
+ }
+
+ /* A packet arriving the vf's mac should be accepted
+ * with any vlan, unless a vlan has already been
+ * configured.
+ */
+ if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
+
+ /* set rx-mode */
+ rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
+ if (rc)
+ goto op_err;
+ }
+
+ if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
+ /* set mcasts */
+ rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
+ msg->n_multicast, false);
+ if (rc)
+ goto op_err;
+ }
+op_err:
+ if (rc)
+ BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
+ vf->abs_vfid, msg->vf_qid, rc);
+ return rc;
+}
+
+static int bnx2x_filters_validate_mac(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *filters)
+{
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
+ int rc = 0;
+
+ /* if a mac was already set for this VF via the set vf mac ndo, we only
+ * accept mac configurations of that mac. Why accept them at all?
+ * because PF may have been unable to configure the mac at the time
+ * since queue was not set up.
+ */
+ if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
+ /* once a mac was set by ndo can only accept a single mac... */
+ if (filters->n_mac_vlan_filters > 1) {
+ BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
+ vf->abs_vfid);
+ rc = -EPERM;
+ goto response;
+ }
+
+ /* ...and only the mac set by the ndo */
+ if (filters->n_mac_vlan_filters == 1 &&
+ !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
+ BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
+ vf->abs_vfid);
+
+ rc = -EPERM;
+ goto response;
+ }
+ }
+
+response:
+ return rc;
+}
+
+static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *filters)
+{
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
+ int rc = 0;
+
+ /* if vlan was set by hypervisor we don't allow guest to config vlan */
+ if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
+ int i;
+
+ /* search for vlan filters */
+ for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+ if (filters->filters[i].flags &
+ VFPF_Q_FILTER_VLAN_TAG_VALID) {
+ BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+ vf->abs_vfid);
+ rc = -EPERM;
+ goto response;
+ }
+ }
+ }
+
+ /* verify vf_qid */
+ if (filters->vf_qid > vf_rxq_count(vf)) {
+ rc = -EPERM;
+ goto response;
+ }
+
+response:
+ return rc;
+}
+
+static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
+ int rc;
+
+ rc = bnx2x_filters_validate_mac(bp, vf, filters);
+ if (rc)
+ goto response;
+
+ rc = bnx2x_filters_validate_vlan(bp, vf, filters);
+ if (rc)
+ goto response;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
+ vf->abs_vfid,
+ filters->vf_qid);
+
+ /* print q_filter message */
+ bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
+
+ rc = bnx2x_vf_mbx_qfilters(bp, vf);
+response:
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int qid = mbx->msg->req.q_op.vf_qid;
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
+ vf->abs_vfid, qid);
+
+ rc = bnx2x_vf_queue_teardown(bp, vf, qid);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
+
+ rc = bnx2x_vf_close(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
+
+ rc = bnx2x_vf_free(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_config_rss_params rss;
+ struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
+ int rc = 0;
+
+ if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
+ rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
+ BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
+ vf->index);
+ rc = -EINVAL;
+ goto mbx_resp;
+ }
+
+ memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
+
+ /* set vfop params according to rss tlv */
+ memcpy(rss.ind_table, rss_tlv->ind_table,
+ T_ETH_INDIRECTION_TABLE_SIZE);
+ memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
+ rss.rss_obj = &vf->rss_conf_obj;
+ rss.rss_result_mask = rss_tlv->rss_result_mask;
+
+ /* flags handled individually for backward/forward compatability */
+ rss.rss_flags = 0;
+ rss.ramrod_flags = 0;
+
+ if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
+ __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
+ __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
+ __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
+ __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
+ __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
+
+ if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
+ rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
+ (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
+ rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
+ BNX2X_ERR("about to hit a FW assert. aborting...\n");
+ rc = -EINVAL;
+ goto mbx_resp;
+ }
+
+ rc = bnx2x_vf_rss_update(bp, vf, &rss);
+mbx_resp:
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static int bnx2x_validate_tpa_params(struct bnx2x *bp,
+ struct vfpf_tpa_tlv *tpa_tlv)
+{
+ int rc = 0;
+
+ if (tpa_tlv->tpa_client_info.max_sges_for_packet >
+ U_ETH_MAX_SGES_FOR_PACKET) {
+ rc = -EINVAL;
+ BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
+ tpa_tlv->tpa_client_info.max_sges_for_packet,
+ U_ETH_MAX_SGES_FOR_PACKET);
+ }
+
+ if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
+ rc = -EINVAL;
+ BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
+ tpa_tlv->tpa_client_info.max_tpa_queues,
+ MAX_AGG_QS(bp));
+ }
+
+ return rc;
+}
+
+static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_queue_update_tpa_params vf_op_params;
+ struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
+ int rc = 0;
+
+ memset(&vf_op_params, 0, sizeof(vf_op_params));
+
+ if (bnx2x_validate_tpa_params(bp, tpa_tlv))
+ goto mbx_resp;
+
+ vf_op_params.complete_on_both_clients =
+ tpa_tlv->tpa_client_info.complete_on_both_clients;
+ vf_op_params.dont_verify_thr =
+ tpa_tlv->tpa_client_info.dont_verify_thr;
+ vf_op_params.max_agg_sz =
+ tpa_tlv->tpa_client_info.max_agg_size;
+ vf_op_params.max_sges_pkt =
+ tpa_tlv->tpa_client_info.max_sges_for_packet;
+ vf_op_params.max_tpa_queues =
+ tpa_tlv->tpa_client_info.max_tpa_queues;
+ vf_op_params.sge_buff_sz =
+ tpa_tlv->tpa_client_info.sge_buff_size;
+ vf_op_params.sge_pause_thr_high =
+ tpa_tlv->tpa_client_info.sge_pause_thr_high;
+ vf_op_params.sge_pause_thr_low =
+ tpa_tlv->tpa_client_info.sge_pause_thr_low;
+ vf_op_params.tpa_mode =
+ tpa_tlv->tpa_client_info.tpa_mode;
+ vf_op_params.update_ipv4 =
+ tpa_tlv->tpa_client_info.update_ipv4;
+ vf_op_params.update_ipv6 =
+ tpa_tlv->tpa_client_info.update_ipv6;
+
+ rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
+
+mbx_resp:
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+/* dispatch request */
+static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int i;
+
+ /* check if tlv type is known */
+ if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
+ /* Lock the per vf op mutex and note the locker's identity.
+ * The unlock will take place in mbx response.
+ */
+ bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
+
+ /* switch on the opcode */
+ switch (mbx->first_tlv.tl.type) {
+ case CHANNEL_TLV_ACQUIRE:
+ bnx2x_vf_mbx_acquire(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_INIT:
+ bnx2x_vf_mbx_init_vf(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_SETUP_Q:
+ bnx2x_vf_mbx_setup_q(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_SET_Q_FILTERS:
+ bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_TEARDOWN_Q:
+ bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_CLOSE:
+ bnx2x_vf_mbx_close_vf(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_RELEASE:
+ bnx2x_vf_mbx_release_vf(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_UPDATE_RSS:
+ bnx2x_vf_mbx_update_rss(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_UPDATE_TPA:
+ bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
+ return;
+ }
+
+ } else {
+ /* unknown TLV - this may belong to a VF driver from the future
+ * - a version written after this PF driver was written, which
+ * supports features unknown as of yet. Too bad since we don't
+ * support them. Or this may be because someone wrote a crappy
+ * VF driver and is sending garbage over the channel.
+ */
+ BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
+ mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
+ vf->state);
+ for (i = 0; i < 20; i++)
+ DP_CONT(BNX2X_MSG_IOV, "%x ",
+ mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
+ }
+
+ /* can we respond to VF (do we have an address for it?) */
+ if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
+ /* notify the VF that we do not support this request */
+ bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
+ } else {
+ /* can't send a response since this VF is unknown to us
+ * just ack the FW to release the mailbox and unlock
+ * the channel.
+ */
+ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+ /* Firmware ack should be written before unlocking channel */
+ mmiowb();
+ bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
+ }
+}
+
+void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event)
+{
+ u8 vf_idx;
+
+ DP(BNX2X_MSG_IOV,
+ "vf pf event received: vfid %d, address_hi %x, address lo %x",
+ vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
+ /* Sanity checks consider removing later */
+
+ /* check if the vf_id is valid */
+ if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
+ BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
+ vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
+ return;
+ }
+
+ vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
+
+ /* Update VFDB with current message and schedule its handling */
+ mutex_lock(&BP_VFDB(bp)->event_mutex);
+ BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi;
+ BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo;
+ BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
+ mutex_unlock(&BP_VFDB(bp)->event_mutex);
+
+ bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
+}
+
+/* handle new vf-pf messages */
+void bnx2x_vf_mbx(struct bnx2x *bp)
+{
+ struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
+ u64 events;
+ u8 vf_idx;
+ int rc;
+
+ if (!vfdb)
+ return;
+
+ mutex_lock(&vfdb->event_mutex);
+ events = vfdb->event_occur;
+ vfdb->event_occur = 0;
+ mutex_unlock(&vfdb->event_mutex);
+
+ for_each_vf(bp, vf_idx) {
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ /* Handle VFs which have pending events */
+ if (!(events & (1ULL << vf_idx)))
+ continue;
+
+ DP(BNX2X_MSG_IOV,
+ "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
+ vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
+ mbx->first_tlv.resp_msg_offset);
+
+ /* dmae to get the VF request */
+ rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
+ vf->abs_vfid, mbx->vf_addr_hi,
+ mbx->vf_addr_lo,
+ sizeof(union vfpf_tlvs)/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy request VF %d\n",
+ vf->abs_vfid);
+ bnx2x_vf_release(bp, vf);
+ return;
+ }
+
+ /* process the VF message header */
+ mbx->first_tlv = mbx->msg->req.first_tlv;
+
+ /* Clean response buffer to refrain from falsely
+ * seeing chains.
+ */
+ memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+
+ /* dispatch the request (will prepare the response) */
+ bnx2x_vf_mbx_request(bp, vf, mbx);
+ }
+}
+
+/* propagate local bulletin board to vf */
+int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
+{
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
+ dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
+ vf * BULLETIN_CONTENT_SIZE;
+ dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
+ int rc;
+
+ /* can only update vf after init took place */
+ if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
+ bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
+ return 0;
+
+ /* increment bulletin board version and compute crc */
+ bulletin->version++;
+ bulletin->length = BULLETIN_CONTENT_SIZE;
+ bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin);
+
+ /* propagate bulletin board via dmae to vm memory */
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
+ bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
+ U64_LO(vf_addr), bulletin->length / 4);
+ return rc;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
new file mode 100644
index 00000000000..e21e706762c
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -0,0 +1,434 @@
+/* bnx2x_vfpf.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2011-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Ariel Elior <ariel.elior@qlogic.com>
+ */
+#ifndef VF_PF_IF_H
+#define VF_PF_IF_H
+
+#ifdef CONFIG_BNX2X_SRIOV
+
+/* Common definitions for all HVs */
+struct vf_pf_resc_request {
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters; /* No limit so superfluous */
+};
+
+struct hw_sb_info {
+ u8 hw_sb_id; /* aka absolute igu id, used to ack the sb */
+ u8 sb_qid; /* used to update DHC for sb */
+};
+
+/* HW VF-PF channel definitions
+ * A.K.A VF-PF mailbox
+ */
+#define TLV_BUFFER_SIZE 1024
+#define PF_VF_BULLETIN_SIZE 512
+
+#define VFPF_QUEUE_FLG_TPA 0x0001
+#define VFPF_QUEUE_FLG_TPA_IPV6 0x0002
+#define VFPF_QUEUE_FLG_TPA_GRO 0x0004
+#define VFPF_QUEUE_FLG_CACHE_ALIGN 0x0008
+#define VFPF_QUEUE_FLG_STATS 0x0010
+#define VFPF_QUEUE_FLG_OV 0x0020
+#define VFPF_QUEUE_FLG_VLAN 0x0040
+#define VFPF_QUEUE_FLG_COS 0x0080
+#define VFPF_QUEUE_FLG_HC 0x0100
+#define VFPF_QUEUE_FLG_DHC 0x0200
+#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400
+
+#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
+#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
+#define VFPF_QUEUE_DROP_TTL0 (1 << 2)
+#define VFPF_QUEUE_DROP_UDP_CS_ERR (1 << 3)
+
+#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000
+#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001
+#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002
+#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004
+#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008
+#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010
+#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content))
+#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */
+#define BULLETIN_CRC_SEED 0
+
+enum {
+ PFVF_STATUS_WAITING = 0,
+ PFVF_STATUS_SUCCESS,
+ PFVF_STATUS_FAILURE,
+ PFVF_STATUS_NOT_SUPPORTED,
+ PFVF_STATUS_NO_RESOURCE
+};
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+ u16 type;
+ u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate response
+ * buffer address
+ */
+struct vfpf_first_tlv {
+ struct channel_tlv tl;
+ u32 resp_msg_offset;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+ struct channel_tlv tl;
+ u8 status;
+ u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_general_resp_tlv {
+ struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+};
+
+/* Acquire */
+struct vfpf_acquire_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_vfdev_info {
+ /* the following fields are for debug purposes */
+ u8 vf_id; /* ME register value */
+ u8 vf_os; /* e.g. Linux, W2K8 */
+ u8 padding[2];
+ } vfdev_info;
+
+ struct vf_pf_resc_request resc_request;
+
+ aligned_u64 bulletin_addr;
+};
+
+/* simple operation request on queue */
+struct vfpf_q_op_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u8 vf_qid;
+ u8 padding[3];
+};
+
+/* receive side scaling tlv */
+struct vfpf_rss_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u32 rss_flags;
+#define VFPF_RSS_MODE_DISABLED (1 << 0)
+#define VFPF_RSS_MODE_REGULAR (1 << 1)
+#define VFPF_RSS_SET_SRCH (1 << 2)
+#define VFPF_RSS_IPV4 (1 << 3)
+#define VFPF_RSS_IPV4_TCP (1 << 4)
+#define VFPF_RSS_IPV4_UDP (1 << 5)
+#define VFPF_RSS_IPV6 (1 << 6)
+#define VFPF_RSS_IPV6_TCP (1 << 7)
+#define VFPF_RSS_IPV6_UDP (1 << 8)
+ u8 rss_result_mask;
+ u8 ind_table_size;
+ u8 rss_key_size;
+ u8 padding;
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ u32 rss_key[T_ETH_RSS_KEY]; /* hash values */
+};
+
+/* acquire response tlv - carries the allocated resources */
+struct pfvf_acquire_resp_tlv {
+ struct pfvf_tlv hdr;
+ struct pf_vf_pfdev_info {
+ u32 chip_num;
+ u32 pf_cap;
+#define PFVF_CAP_RSS 0x00000001
+#define PFVF_CAP_DHC 0x00000002
+#define PFVF_CAP_TPA 0x00000004
+#define PFVF_CAP_TPA_UPDATE 0x00000008
+ char fw_ver[32];
+ u16 db_size;
+ u8 indices_per_sb;
+ u8 padding;
+ } pfdev_info;
+ struct pf_vf_resc {
+ /* in case of status NO_RESOURCE in message hdr, pf will fill
+ * this struct with suggested amount of resources for next
+ * acquire request
+ */
+#define PFVF_MAX_QUEUES_PER_VF 16
+#define PFVF_MAX_SBS_PER_VF 16
+ struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+ u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u8 permanent_mac_addr[ETH_ALEN];
+ u8 current_mac_addr[ETH_ALEN];
+ u8 padding[2];
+ } resc;
+};
+
+struct vfpf_port_phys_id_resp_tlv {
+ struct channel_tlv tl;
+ u8 id[ETH_ALEN];
+ u8 padding[2];
+};
+
+#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
+ * stats will be coalesced on
+ * the leading RSS queue
+ */
+
+/* Init VF */
+struct vfpf_init_tlv {
+ struct vfpf_first_tlv first_tlv;
+ aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
+ aligned_u64 spq_addr;
+ aligned_u64 stats_addr;
+ u16 stats_stride;
+ u32 flags;
+ u32 padding[2];
+};
+
+/* Setup Queue */
+struct vfpf_setup_q_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_rxq_params {
+ /* physical addresses */
+ aligned_u64 rcq_addr;
+ aligned_u64 rcq_np_addr;
+ aligned_u64 rxq_addr;
+ aligned_u64 sge_addr;
+
+ /* sb + hc info */
+ u8 vf_sb; /* index in hw_sbs[] */
+ u8 sb_index; /* Index in the SB */
+ u16 hc_rate; /* desired interrupts per sec. */
+ /* valid iff VFPF_QUEUE_FLG_HC */
+ /* rx buffer info */
+ u16 mtu;
+ u16 buf_sz;
+ u16 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */
+
+ /* valid iff VFPF_QUEUE_FLG_TPA */
+ u16 sge_buf_sz;
+ u16 tpa_agg_sz;
+ u8 max_sge_pkt;
+
+ u8 drop_flags; /* VFPF_QUEUE_DROP_X, for Linux VMs
+ * all the flags are turned off
+ */
+
+ u8 cache_line_log; /* VFPF_QUEUE_FLG_CACHE_ALIGN */
+ u8 padding;
+ } rxq;
+
+ struct vf_pf_txq_params {
+ /* physical addresses */
+ aligned_u64 txq_addr;
+
+ /* sb + hc info */
+ u8 vf_sb; /* index in hw_sbs[] */
+ u8 sb_index; /* Index in the SB */
+ u16 hc_rate; /* desired interrupts per sec. */
+ /* valid iff VFPF_QUEUE_FLG_HC */
+ u32 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */
+ u8 traffic_type; /* see in setup_context() */
+ u8 padding;
+ } txq;
+
+ u8 vf_qid; /* index in hw_qid[] */
+ u8 param_valid;
+#define VFPF_RXQ_VALID 0x01
+#define VFPF_TXQ_VALID 0x02
+ u8 padding[2];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+ u32 flags;
+#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
+#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
+#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+ u8 mac[ETH_ALEN];
+ u16 vlan_tag;
+};
+
+/* configure queue filters */
+struct vfpf_set_q_filters_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u32 flags;
+#define VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED 0x01
+#define VFPF_SET_Q_FILTERS_MULTICAST_CHANGED 0x02
+#define VFPF_SET_Q_FILTERS_RX_MASK_CHANGED 0x04
+
+ u8 vf_qid; /* index in hw_qid[] */
+ u8 n_mac_vlan_filters;
+ u8 n_multicast;
+ u8 padding;
+
+#define PFVF_MAX_MAC_FILTERS 16
+#define PFVF_MAX_VLAN_FILTERS 16
+#define PFVF_MAX_FILTERS (PFVF_MAX_MAC_FILTERS +\
+ PFVF_MAX_VLAN_FILTERS)
+ struct vfpf_q_mac_vlan_filter filters[PFVF_MAX_FILTERS];
+
+#define PFVF_MAX_MULTICAST_PER_VF 32
+ u8 multicast[PFVF_MAX_MULTICAST_PER_VF][ETH_ALEN];
+
+ u32 rx_mask; /* see mask constants at the top of the file */
+};
+
+struct vfpf_tpa_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_tpa_client_info {
+ aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 max_tpa_queues;
+ u8 max_sges_for_packet;
+ u8 complete_on_both_clients;
+ u8 dont_verify_thr;
+ u8 tpa_mode;
+ u16 sge_buff_size;
+ u16 max_agg_size;
+ u16 sge_pause_thr_low;
+ u16 sge_pause_thr_high;
+ } tpa_client_info;
+};
+
+/* close VF (disable VF) */
+struct vfpf_close_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 vf_id; /* for debug */
+ u8 padding[2];
+};
+
+/* release the VF's acquired resources */
+struct vfpf_release_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 vf_id;
+ u8 padding[2];
+};
+
+struct tlv_buffer_size {
+ u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+union vfpf_tlvs {
+ struct vfpf_first_tlv first_tlv;
+ struct vfpf_acquire_tlv acquire;
+ struct vfpf_init_tlv init;
+ struct vfpf_close_tlv close;
+ struct vfpf_q_op_tlv q_op;
+ struct vfpf_setup_q_tlv setup_q;
+ struct vfpf_set_q_filters_tlv set_q_filters;
+ struct vfpf_release_tlv release;
+ struct vfpf_rss_tlv update_rss;
+ struct vfpf_tpa_tlv update_tpa;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+ struct pfvf_general_resp_tlv general_resp;
+ struct pfvf_acquire_resp_tlv acquire_resp;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+/* This is a structure which is allocated in the VF, which the PF may update
+ * when it deems it necessary to do so. The bulletin board is sampled
+ * periodically by the VF. A copy per VF is maintained in the PF (to prevent
+ * loss of data upon multiple updates (or the need for read modify write)).
+ */
+struct pf_vf_bulletin_size {
+ u8 size[PF_VF_BULLETIN_SIZE];
+};
+
+struct pf_vf_bulletin_content {
+ u32 crc; /* crc of structure to ensure is not in
+ * mid-update
+ */
+ u16 version;
+ u16 length;
+
+ aligned_u64 valid_bitmap; /* bitmap indicating which fields
+ * hold valid values
+ */
+
+#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
+ * is available for it
+ */
+#define VLAN_VALID 1 /* when set, the vf should not access
+ * the vfpf channel
+ */
+#define CHANNEL_DOWN 2 /* vfpf channel is disabled. VFs are not
+ * to attempt to send messages on the
+ * channel after this bit is set
+ */
+ u8 mac[ETH_ALEN];
+ u8 mac_padding[2];
+
+ u16 vlan;
+ u8 vlan_padding[6];
+};
+
+union pf_vf_bulletin {
+ struct pf_vf_bulletin_content content;
+ struct pf_vf_bulletin_size size;
+};
+
+#define MAX_TLVS_IN_LIST 50
+
+enum channel_tlvs {
+ CHANNEL_TLV_NONE,
+ CHANNEL_TLV_ACQUIRE,
+ CHANNEL_TLV_INIT,
+ CHANNEL_TLV_SETUP_Q,
+ CHANNEL_TLV_SET_Q_FILTERS,
+ CHANNEL_TLV_ACTIVATE_Q,
+ CHANNEL_TLV_DEACTIVATE_Q,
+ CHANNEL_TLV_TEARDOWN_Q,
+ CHANNEL_TLV_CLOSE,
+ CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_UPDATE_RSS_DEPRECATED,
+ CHANNEL_TLV_PF_RELEASE_VF,
+ CHANNEL_TLV_LIST_END,
+ CHANNEL_TLV_FLR,
+ CHANNEL_TLV_PF_SET_MAC,
+ CHANNEL_TLV_PF_SET_VLAN,
+ CHANNEL_TLV_UPDATE_RSS,
+ CHANNEL_TLV_PHYS_PORT_ID,
+ CHANNEL_TLV_UPDATE_TPA,
+ CHANNEL_TLV_MAX
+};
+
+#endif /* CONFIG_BNX2X_SRIOV */
+#endif /* VF_PF_IF_H */