aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bnx2x
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h221
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c521
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h189
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c52
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c189
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h42
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c406
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c1440
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c721
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h57
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2535
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h431
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c99
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c1018
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h74
23 files changed, 4217 insertions, 3858 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index dedbd76c033..8206a293e6b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
*/
@@ -26,8 +26,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.78.17-0"
-#define DRV_MODULE_RELDATE "2013/04/11"
+#define DRV_MODULE_VERSION "1.78.19-0"
+#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -75,13 +75,22 @@ enum bnx2x_int_mode {
#define BNX2X_MSG_DCB 0x8000000
/* regular debug print */
+#define DP_INNER(fmt, ...) \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__);
+
#define DP(__mask, fmt, ...) \
do { \
if (unlikely(bp->msg_enable & (__mask))) \
- pr_notice("[%s:%d(%s)]" fmt, \
- __func__, __LINE__, \
- bp->dev ? (bp->dev->name) : "?", \
- ##__VA_ARGS__); \
+ DP_INNER(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define DP_AND(__mask, fmt, ...) \
+do { \
+ if (unlikely((bp->msg_enable & (__mask)) == __mask)) \
+ DP_INNER(fmt, ##__VA_ARGS__); \
} while (0)
#define DP_CONT(__mask, fmt, ...) \
@@ -246,8 +255,37 @@ enum {
BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
};
-#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
+/* use a value high enough to be above all the PFs, which has least significant
+ * nibble as 8, so when cnic needs to come up with a CID for UIO to use to
+ * calculate doorbell address according to old doorbell configuration scheme
+ * (db_msg_sz 1 << 7 * cid + 0x40 DPM offset) it can come up with a valid number
+ * We must avoid coming up with cid 8 for iscsi since according to this method
+ * the designated UIO cid will come out 0 and it has a special handling for that
+ * case which doesn't suit us. Therefore will will cieling to closes cid which
+ * has least signigifcant nibble 8 and if it is 8 we will move forward to 0x18.
+ */
+
+#define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \
(bp)->max_cos)
+/* amount of cids traversed by UIO's DPM addition to doorbell */
+#define UIO_DPM 8
+/* roundup to DPM offset */
+#define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \
+ UIO_DPM))
+/* offset to nearest value which has lsb nibble matching DPM */
+#define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \
+ (UIO_DPM * 2))
+/* add offset to rounded-up cid to get a value which could be used with UIO */
+#define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp))
+/* but wait - avoid UIO special case for cid 0 */
+#define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \
+ (UIO_DPM_ALIGN(bp) == UIO_DPM))
+/* Properly DPM aligned CID dajusted to cid 0 secal case */
+#define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \
+ (UIO_DPM_CID0_OFFSET(bp)))
+/* how many cids were wasted - need this value for cid allocation */
+#define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \
+ BNX2X_1st_NON_L2_ETH_CID(bp))
/* iSCSI L2 */
#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
/* FCoE L2 */
@@ -308,6 +346,7 @@ struct sw_tx_bd {
u8 flags;
/* Set on the first BD descriptor when there is a split BD */
#define BNX2X_TSO_SPLIT_BD (1<<0)
+#define BNX2X_HAS_SECOND_PBD (1<<1)
};
struct sw_rx_page {
@@ -443,7 +482,7 @@ struct bnx2x_agg_info {
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash;
- bool l4_rxhash;
+ enum pkt_hash_types rxhash_type;
u16 gro_size;
u16 full_page;
};
@@ -486,19 +525,21 @@ struct bnx2x_fastpath {
struct napi_struct napi;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define BNX2X_FP_STATE_IDLE 0
#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
-#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */
-#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */
+#define BNX2X_FP_STATE_DISABLED (1 << 2)
+#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
+#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
+#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
-#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
+#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
/* protect state */
spinlock_t lock;
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
union host_hc_status_block status_blk;
/* chip independent shortcuts into sb structure */
@@ -572,7 +613,7 @@ struct bnx2x_fastpath {
#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
{
spin_lock_init(&fp->lock);
@@ -584,7 +625,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
{
bool rc = true;
- spin_lock(&fp->lock);
+ spin_lock_bh(&fp->lock);
if (fp->state & BNX2X_FP_LOCKED) {
WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
@@ -593,7 +634,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
/* we don't care if someone yielded */
fp->state = BNX2X_FP_STATE_NAPI;
}
- spin_unlock(&fp->lock);
+ spin_unlock_bh(&fp->lock);
return rc;
}
@@ -602,14 +643,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
{
bool rc = false;
- spin_lock(&fp->lock);
+ spin_lock_bh(&fp->lock);
WARN_ON(fp->state &
(BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
rc = true;
- fp->state = BNX2X_FP_STATE_IDLE;
- spin_unlock(&fp->lock);
+
+ /* state ==> idle, unless currently disabled */
+ fp->state &= BNX2X_FP_STATE_DISABLED;
+ spin_unlock_bh(&fp->lock);
return rc;
}
@@ -640,7 +683,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
rc = true;
- fp->state = BNX2X_FP_STATE_IDLE;
+
+ /* state ==> idle, unless currently disabled */
+ fp->state &= BNX2X_FP_STATE_DISABLED;
spin_unlock_bh(&fp->lock);
return rc;
}
@@ -648,9 +693,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
/* true if a socket is polling, even if it did not get the lock */
static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{
- WARN_ON(!(fp->state & BNX2X_FP_LOCKED));
+ WARN_ON(!(fp->state & BNX2X_FP_OWNED));
return fp->state & BNX2X_FP_USER_PEND;
}
+
+/* false if fp is currently owned */
+static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+{
+ int rc = true;
+
+ spin_lock_bh(&fp->lock);
+ if (fp->state & BNX2X_FP_OWNED)
+ rc = false;
+ fp->state |= BNX2X_FP_STATE_DISABLED;
+ spin_unlock_bh(&fp->lock);
+
+ return rc;
+}
#else
static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
{
@@ -680,7 +739,11 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{
return false;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+{
+ return true;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
/* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
@@ -825,15 +888,13 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
-#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
+#define BNX2X_DB_SHIFT 3 /* 8 bytes*/
#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
#error "Min DB doorbell stride is 8"
#endif
-#define DPM_TRIGER_TYPE 0x40
#define DOORBELL(bp, cid, val) \
do { \
- writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
- DPM_TRIGER_TYPE); \
+ writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \
} while (0)
/* TX CSUM helpers */
@@ -1095,18 +1156,28 @@ struct bnx2x_port {
(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
/* slow path */
-
-/* slow path work-queue */
-extern struct workqueue_struct *bnx2x_wq;
-
#define BNX2X_MAX_NUM_OF_VFS 64
-#define BNX2X_VF_CID_WND 0
+#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
-#define BNX2X_CLIENTS_PER_VF 1
-#define BNX2X_FIRST_VF_CID 256
+
+/* We need to reserve doorbell addresses for all VF and queue combinations */
#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
+
+/* The doorbell is configured to have the same number of CIDs for PFs and for
+ * VFs. For this reason the PF CID zone is as large as the VF zone.
+ */
+#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS
+#define BNX2X_MAX_NUM_VF_QUEUES 64
#define BNX2X_VF_ID_INVALID 0xFF
+/* the number of VF CIDS multiplied by the amount of bytes reserved for each
+ * cid must not exceed the size of the VF doorbell
+ */
+#define BNX2X_VF_BAR_SIZE 512
+#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT))
+#error "VF doorbell bar size is 512"
+#endif
+
/*
* The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
* control by the number of fast-path status blocks supported by the
@@ -1156,8 +1227,9 @@ union cdu_context {
/* TM (timers) host DB constants */
#define TM_ILT_PAGE_SZ_HW 0
#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
-/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
-#define TM_CONN_NUM 1024
+#define TM_CONN_NUM (BNX2X_FIRST_VF_CID + \
+ BNX2X_VF_CIDS + \
+ CNIC_ISCSI_CID_MAX)
#define TM_ILT_SZ (8 * TM_CONN_NUM)
#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
@@ -1195,6 +1267,7 @@ struct bnx2x_slowpath {
union {
struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data;
+ struct tpa_update_ramrod_data tpa_data;
} q_rdata;
union {
@@ -1208,7 +1281,10 @@ struct bnx2x_slowpath {
* Therefore, if they would have been defined in the same union,
* data can get corrupted.
*/
- struct afex_vif_list_ramrod_data func_afex_rdata;
+ union {
+ struct afex_vif_list_ramrod_data viflist_data;
+ struct function_update_data func_update;
+ } func_afex_rdata;
/* used by dmae command executer */
struct dmae_command dmae[MAX_DMAE_C];
@@ -1323,7 +1399,7 @@ struct bnx2x_fw_stats_data {
};
/* Public slow path states */
-enum {
+enum sp_rtnl_flag {
BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT,
BNX2X_SP_RTNL_FAN_FAILURE,
@@ -1331,8 +1407,15 @@ enum {
BNX2X_SP_RTNL_ENABLE_SRIOV,
BNX2X_SP_RTNL_VFPF_MCAST,
BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
- BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ BNX2X_SP_RTNL_RX_MODE,
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ BNX2X_SP_RTNL_TX_STOP,
+ BNX2X_SP_RTNL_GET_DRV_VERSION,
+};
+
+enum bnx2x_iov_flag {
+ BNX2X_IOV_HANDLE_VF_MSG,
+ BNX2X_IOV_HANDLE_FLR,
};
struct bnx2x_prev_path_list {
@@ -1484,7 +1567,6 @@ struct bnx2x {
#define PCI_32BIT_FLAG (1 << 1)
#define ONE_PORT_FLAG (1 << 2)
#define NO_WOL_FLAG (1 << 3)
-#define USING_DAC_FLAG (1 << 4)
#define USING_MSIX_FLAG (1 << 5)
#define USING_MSI_FLAG (1 << 6)
#define DISABLE_MSI_FLAG (1 << 7)
@@ -1497,11 +1579,15 @@ struct bnx2x {
#define NO_ISCSI_FLAG (1 << 14)
#define NO_FCOE_FLAG (1 << 15)
#define BC_SUPPORTS_PFC_STATS (1 << 17)
+#define TX_SWITCHING (1 << 18)
#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
#define USING_SINGLE_MSIX_FLAG (1 << 20)
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
#define IS_VF_FLAG (1 << 22)
#define INTERRUPTS_ENABLED_FLAG (1 << 23)
+#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
+#define HAS_PHYS_PORT_ID (1 << 25)
+#define AER_ENABLED (1 << 26)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
@@ -1527,10 +1613,11 @@ struct bnx2x {
*/
bool fcoe_init;
- int pm_cap;
int mrrs;
struct delayed_work sp_task;
+ struct delayed_work iov_task;
+
atomic_t interrupt_occurred;
struct delayed_work sp_rtnl_task;
@@ -1578,7 +1665,7 @@ struct bnx2x {
u16 rx_ticks_int;
u16 rx_ticks;
/* Maximal coalescing timeout in us */
-#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
+#define BNX2X_MAX_COALESCE_TOUT (0xff*BNX2X_BTR)
u32 lin_cnt;
@@ -1621,6 +1708,10 @@ struct bnx2x {
struct bnx2x_slowpath *slowpath;
dma_addr_t slowpath_mapping;
+ /* Mechanism protecting the drv_info_to_mcp */
+ struct mutex drv_info_mutex;
+ bool drv_info_mng_owner;
+
/* Total number of FW statistics requests */
u8 fw_stats_num;
@@ -1647,10 +1738,10 @@ struct bnx2x {
dma_addr_t fw_stats_data_mapping;
int fw_stats_data_sz;
- /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
+ /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB
* context size we need 8 ILT entries.
*/
-#define ILT_MAX_L2_LINES 8
+#define ILT_MAX_L2_LINES 32
struct hw_context context[ILT_MAX_L2_LINES];
struct bnx2x_ilt *ilt;
@@ -1666,10 +1757,11 @@ struct bnx2x {
* Maximum CID count that might be required by the bnx2x:
* Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
*/
+
#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
- + 2 * CNIC_SUPPORT(bp))
+ + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
- + 2 * CNIC_SUPPORT(bp))
+ + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
ILT_PAGE_CIDS))
@@ -1809,6 +1901,9 @@ struct bnx2x {
/* operation indication for the sp_rtnl task */
unsigned long sp_rtnl_state;
+ /* Indication of the IOV tasks */
+ unsigned long iov_task_state;
+
/* DCBX Negotiation results */
struct dcbx_features dcbx_local_feat;
u32 dcbx_error;
@@ -1830,6 +1925,10 @@ struct bnx2x {
int fp_array_size;
u32 dump_preset_idx;
+ bool stats_started;
+ struct semaphore stats_sema;
+
+ u8 phys_port_id[ETH_ALEN];
};
/* Tx queues may be less or equal to Rx queues */
@@ -1864,7 +1963,7 @@ extern int num_queues;
#define FUNC_FLG_TPA 0x0008
#define FUNC_FLG_SPQ 0x0010
#define FUNC_FLG_LEADING 0x0020 /* PF only */
-
+#define FUNC_FLG_LEADING_STATS 0x0040
struct bnx2x_func_init_params {
/* dma */
dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
@@ -2005,7 +2104,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id);
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp);
int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
@@ -2026,7 +2124,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
u8 src_type, u8 dst_type);
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae);
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u32 *comp);
/* FLR related routines */
u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
@@ -2064,9 +2163,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
bool is_pf);
-#define BNX2X_ILT_ZALLOC(x, y, size) \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
- GFP_KERNEL | __GFP_ZERO)
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+ x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
#define BNX2X_ILT_FREE(x, y, size) \
do { \
@@ -2186,7 +2284,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define BNX2X_NUM_TESTS_SF 7
#define BNX2X_NUM_TESTS_MF 3
#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
- BNX2X_NUM_TESTS_SF)
+ IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF)
#define BNX2X_PHY_LOOPBACK 0
#define BNX2X_MAC_LOOPBACK 1
@@ -2388,7 +2486,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
(!((me_reg) & ME_REG_VF_ERR)))
-int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code);
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err);
+
/* Congestion management fairness mode */
#define CMNG_FNS_NONE 0
#define CMNG_FNS_MINMAX 1
@@ -2446,9 +2545,13 @@ enum {
#define NUM_MACS 8
-enum bnx2x_pci_bus_speed {
- BNX2X_PCI_LINK_SPEED_2500 = 2500,
- BNX2X_PCI_LINK_SPEED_5000 = 5000,
- BNX2X_PCI_LINK_SPEED_8000 = 8000
-};
+void bnx2x_set_local_cmng(struct bnx2x *bp);
+
+void bnx2x_update_mng_version(struct bnx2x *bp);
+
+#define MCPR_SCRATCH_BASE(bp) \
+ (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+
+#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ee350bde181..c43e7238de2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -30,6 +30,47 @@
#include "bnx2x_init.h"
#include "bnx2x_sp.h"
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
+static int bnx2x_poll(struct napi_struct *napi, int budget);
+
+static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_rx_queue_cnic(bp, i) {
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+ bnx2x_poll, NAPI_POLL_WEIGHT);
+ napi_hash_add(&bnx2x_fp(bp, i, napi));
+ }
+}
+
+static void bnx2x_add_all_napi(struct bnx2x *bp)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_eth_queue(bp, i) {
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+ bnx2x_poll, NAPI_POLL_WEIGHT);
+ napi_hash_add(&bnx2x_fp(bp, i, napi));
+ }
+}
+
+static int bnx2x_calc_num_queues(struct bnx2x *bp)
+{
+ int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
+
+ /* Reduce memory usage in kdump environment by using only one queue */
+ if (reset_devices)
+ nq = 1;
+
+ nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
+ return nq;
+}
+
/**
* bnx2x_move_fp - move content of the fastpath structure.
*
@@ -53,6 +94,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
int old_max_eth_txqs, new_max_eth_txqs;
int old_txdata_index = 0, new_txdata_index = 0;
+ struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
/* Copy the NAPI object as it has been already initialized */
from_fp->napi = to_fp->napi;
@@ -61,6 +103,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
memcpy(to_fp, from_fp, sizeof(*to_fp));
to_fp->index = to;
+ /* Retain the tpa_info of the original `to' version as we don't want
+ * 2 FPs to contain the same tpa_info pointer.
+ */
+ to_fp->tpa_info = old_tpa_info;
+
/* move sp_objs contents as well, as their indices match fp ones */
memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
@@ -139,7 +186,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
}
}
-int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
+int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
/* free skb in the packet ring at pos idx
* return idx of last bd freed
@@ -154,6 +201,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
struct sk_buff *skb = tx_buf->skb;
u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
int nbd;
+ u16 split_bd_len = 0;
/* prefetch skb end pointer to speedup dev_kfree_skb() */
prefetch(&skb->end);
@@ -161,10 +209,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
txdata->txq_index, idx, tx_buf, skb);
- /* unmap first bd */
tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
- dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
- BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
#ifdef BNX2X_STOP_ON_ERROR
@@ -182,12 +227,25 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
- /* ...and the TSO split header bd since they have no mapping */
+ if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
+ /* Skip second parse bd... */
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ }
+
+ /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+ tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+ split_bd_len = BD_UNMAP_LEN(tx_data_bd);
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
}
+ /* unmap first bd */
+ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+ BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
+ DMA_TO_DEVICE);
+
/* now free frags */
while (nbd > 0) {
@@ -348,7 +406,7 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
*/
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
const struct eth_fast_path_rx_cqe *cqe,
- bool *l4_rxhash)
+ enum pkt_hash_types *rxhash_type)
{
/* Get Toeplitz hash from CQE */
if ((bp->dev->features & NETIF_F_RXHASH) &&
@@ -356,11 +414,13 @@ static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
enum eth_rss_hash_type htype;
htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
- *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
- (htype == TCP_IPV6_HASH_TYPE);
+ *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
+ (htype == TCP_IPV6_HASH_TYPE)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
+
return le32_to_cpu(cqe->rss_hash_result);
}
- *l4_rxhash = false;
+ *rxhash_type = PKT_HASH_TYPE_NONE;
return 0;
}
@@ -414,7 +474,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->tpa_state = BNX2X_TPA_START;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
- tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
@@ -484,10 +544,10 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
}
-static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
+static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 index, gfp_t gfp_mask)
{
- struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
+ struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
dma_addr_t mapping;
@@ -566,7 +626,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* If we fail to allocate a substitute page, we simply stop
where we are and drop the whole packet */
- err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
+ err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
if (unlikely(err)) {
bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
return err;
@@ -610,12 +670,17 @@ static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
kfree(data);
}
-static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
+static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
{
- if (fp->rx_frag_size)
+ if (fp->rx_frag_size) {
+ /* GFP_KERNEL allocations are used only during initialization */
+ if (unlikely(gfp_mask & __GFP_WAIT))
+ return (void *)__get_free_page(gfp_mask);
+
return netdev_alloc_frag(fp->rx_frag_size);
+ }
- return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+ return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
}
#ifdef CONFIG_INET
@@ -670,6 +735,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
}
#endif
+ skb_record_rx_queue(skb, fp->rx_queue);
napi_gro_receive(&fp->napi, skb);
}
@@ -695,7 +761,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
goto drop;
/* Try to allocate the new data */
- new_data = bnx2x_frag_alloc(fp);
+ new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
@@ -716,8 +782,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb_reserve(skb, pad + NET_SKB_PAD);
skb_put(skb, len);
- skb->rxhash = tpa_info->rxhash;
- skb->l4_rxhash = tpa_info->l4_rxhash;
+ skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -738,7 +803,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return;
}
- bnx2x_frag_free(fp, new_data);
+ if (new_data)
+ bnx2x_frag_free(fp, new_data);
drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
@@ -746,15 +812,15 @@ drop:
bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
}
-static int bnx2x_alloc_rx_data(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
+static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 index, gfp_t gfp_mask)
{
u8 *data;
struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
dma_addr_t mapping;
- data = bnx2x_frag_alloc(fp);
+ data = bnx2x_frag_alloc(fp, gfp_mask);
if (unlikely(data == NULL))
return -ENOMEM;
@@ -800,7 +866,7 @@ void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
-int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
struct bnx2x *bp = fp->bp;
u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
@@ -813,6 +879,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (unlikely(bp->panic))
return 0;
#endif
+ if (budget <= 0)
+ return rx_pkt;
bd_cons = fp->rx_bd_cons;
bd_prod = fp->rx_bd_prod;
@@ -834,7 +902,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad, queue;
u8 *data;
- bool l4_rxhash;
+ u32 rxhash;
+ enum pkt_hash_types rxhash_type;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -844,6 +913,18 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
bd_prod = RX_BD(bd_prod);
bd_cons = RX_BD(bd_cons);
+ /* A rmb() is required to ensure that the CQE is not read
+ * before it is written by the adapter DMA. PCI ordering
+ * rules will make sure the other fields are written before
+ * the marker at the end of struct eth_fast_path_rx_cqe
+ * but without rmb() a weakly ordered processor can process
+ * stale data. Without the barrier TPA state-machine might
+ * enter inconsistent state and kernel stack might be
+ * provided with incorrect packet description - these lead
+ * to various kernel crashed.
+ */
+ rmb();
+
cqe_fp_flags = cqe_fp->type_error_flags;
cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
@@ -947,7 +1028,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
memcpy(skb->data, data + pad, len);
bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
} else {
- if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
+ if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
+ GFP_ATOMIC) == 0)) {
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size,
@@ -974,8 +1056,8 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
/* Set Toeplitz hash for a none-LRO skb */
- skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
- skb->l4_rxhash = l4_rxhash;
+ rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
+ skb_set_hash(skb, rxhash, rxhash_type);
skb_checksum_none_assert(skb);
@@ -1307,7 +1389,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
struct sw_rx_bd *first_buf =
&tpa_info->first_buf;
- first_buf->data = bnx2x_frag_alloc(fp);
+ first_buf->data =
+ bnx2x_frag_alloc(fp, GFP_KERNEL);
if (!first_buf->data) {
BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
j);
@@ -1329,7 +1412,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
for (i = 0, ring_prod = 0;
i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
- if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
+ if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
+ GFP_KERNEL) < 0) {
BNX2X_ERR("was only able to allocate %d rx sges\n",
i);
BNX2X_ERR("disabling TPA for queue[%d]\n",
@@ -1466,7 +1550,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
}
}
-void bnx2x_free_skbs_cnic(struct bnx2x *bp)
+static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
{
bnx2x_free_tx_skbs_cnic(bp);
bnx2x_free_rx_skbs_cnic(bp);
@@ -1579,36 +1663,16 @@ int bnx2x_enable_msix(struct bnx2x *bp)
DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
msix_vec);
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
-
+ rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
+ BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
/*
* reconfigure number of tx/rx queues according to available
* MSI-X vectors
*/
- if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
- /* how less vectors we will have? */
- int diff = msix_vec - rc;
-
- BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
-
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
-
- if (rc) {
- BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
- goto no_msix;
- }
- /*
- * decrease number of queues by number of unallocated entries
- */
- bp->num_ethernet_queues -= diff;
- bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
-
- BNX2X_DEV_INFO("New queue configuration set: %d\n",
- bp->num_queues);
- } else if (rc > 0) {
+ if (rc == -ENOSPC) {
/* Get by with single vector */
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
- if (rc) {
+ rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
+ if (rc < 0) {
BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
rc);
goto no_msix;
@@ -1621,8 +1685,22 @@ int bnx2x_enable_msix(struct bnx2x *bp)
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
} else if (rc < 0) {
- BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
+ BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
goto no_msix;
+ } else if (rc < msix_vec) {
+ /* how less vectors we will have? */
+ int diff = msix_vec - rc;
+
+ BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
+
+ /*
+ * decrease number of queues by number of unallocated entries
+ */
+ bp->num_ethernet_queues -= diff;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+
+ BNX2X_DEV_INFO("New queue configuration set: %d\n",
+ bp->num_queues);
}
bp->flags |= USING_MSIX_FLAG;
@@ -1775,26 +1853,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
{
int i;
- local_bh_disable();
for_each_rx_queue_cnic(bp, i) {
napi_disable(&bnx2x_fp(bp, i, napi));
- while (!bnx2x_fp_lock_napi(&bp->fp[i]))
- mdelay(1);
+ while (!bnx2x_fp_ll_disable(&bp->fp[i]))
+ usleep_range(1000, 2000);
}
- local_bh_enable();
}
static void bnx2x_napi_disable(struct bnx2x *bp)
{
int i;
- local_bh_disable();
for_each_eth_queue(bp, i) {
napi_disable(&bnx2x_fp(bp, i, napi));
- while (!bnx2x_fp_lock_napi(&bp->fp[i]))
- mdelay(1);
+ while (!bnx2x_fp_ll_disable(&bp->fp[i]))
+ usleep_range(1000, 2000);
}
- local_bh_enable();
}
void bnx2x_netif_start(struct bnx2x *bp)
@@ -1817,7 +1891,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
bnx2x_napi_disable_cnic(bp);
}
-u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1839,7 +1914,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
}
/* select a non-FCoE queue */
- return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
+ return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -1942,7 +2017,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
}
}
-static int bnx2x_init_rss_pf(struct bnx2x *bp)
+static int bnx2x_init_rss(struct bnx2x *bp)
{
int i;
u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
@@ -1966,8 +2041,8 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
}
-int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- bool config_hash)
+int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+ bool config_hash, bool enable)
{
struct bnx2x_config_rss_params params = {NULL};
@@ -1982,17 +2057,21 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
- __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
-
- /* RSS configuration */
- __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
- __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
- __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
- __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
- if (rss_obj->udp_rss_v4)
- __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
- if (rss_obj->udp_rss_v6)
- __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
+ if (enable) {
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
+
+ /* RSS configuration */
+ __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+ if (rss_obj->udp_rss_v4)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
+ if (rss_obj->udp_rss_v6)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
+ } else {
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
+ }
/* Hash bits */
params.rss_result_mask = MULTI_MASK;
@@ -2001,11 +2080,14 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
if (config_hash) {
/* RSS keys */
- prandom_bytes(params.rss_key, sizeof(params.rss_key));
+ prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
}
- return bnx2x_config_rss(bp, &params);
+ if (IS_PF(bp))
+ return bnx2x_config_rss(bp, &params);
+ else
+ return bnx2x_vfpf_config_rss(bp, &params);
}
static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
@@ -2060,7 +2142,11 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
rparam.mcast_obj = &bp->mcast_obj;
__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
- /* Add a DEL command... */
+ /* Add a DEL command... - Since we're doing a driver cleanup only,
+ * we take a lock surrounding both the initial send and the CONTs,
+ * as we don't want a true completion to disrupt us in the middle.
+ */
+ netif_addr_lock_bh(bp->dev);
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
if (rc < 0)
BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
@@ -2072,11 +2158,13 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
if (rc < 0) {
BNX2X_ERR("Failed to clean multi-cast object: %d\n",
rc);
+ netif_addr_unlock_bh(bp->dev);
return;
}
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
}
+ netif_addr_unlock_bh(bp->dev);
}
#ifndef BNX2X_STOP_ON_ERROR
@@ -2165,8 +2253,10 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
sizeof(struct per_queue_stats) * num_queue_stats +
sizeof(struct stats_counter);
- BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
- bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ if (!bp->fw_stats)
+ goto alloc_mem_err;
/* Set shortcuts */
bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
@@ -2235,7 +2325,7 @@ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
* virtualized environments a pf from another VM may have already
* initialized the device including loading FW
*/
-int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
{
/* is another pf loaded on this engine? */
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
@@ -2254,8 +2344,12 @@ int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
/* abort nic load if version mismatch */
if (my_fw != loaded_fw) {
- BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
- loaded_fw, my_fw);
+ if (print_err)
+ BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
+ loaded_fw, my_fw);
+ else
+ BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
+ loaded_fw, my_fw);
return -EBUSY;
}
}
@@ -2268,16 +2362,16 @@ static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
int path = BP_PATH(bp);
DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- load_count[path][0]++;
- load_count[path][1 + port]++;
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ bnx2x_load_count[path][0]++;
+ bnx2x_load_count[path][1 + port]++;
DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- if (load_count[path][0] == 1)
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ if (bnx2x_load_count[path][0] == 1)
return FW_MSG_CODE_DRV_LOAD_COMMON;
- else if (load_count[path][1 + port] == 1)
+ else if (bnx2x_load_count[path][1 + port] == 1)
return FW_MSG_CODE_DRV_LOAD_PORT;
else
return FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@ -2432,9 +2526,7 @@ int bnx2x_load_cnic(struct bnx2x *bp)
}
/* Initialize Rx filter. */
- netif_addr_lock_bh(bp->dev);
- bnx2x_set_rx_mode(bp->dev);
- netif_addr_unlock_bh(bp->dev);
+ bnx2x_set_rx_mode_inner(bp);
/* re-read iscsi info */
bnx2x_get_iscsi_info(bp);
@@ -2456,8 +2548,7 @@ load_error_cnic2:
load_error_cnic1:
bnx2x_napi_disable_cnic(bp);
/* Update the number of queues without the cnic queues */
- rc = bnx2x_set_real_num_queues(bp, 0);
- if (rc)
+ if (bnx2x_set_real_num_queues(bp, 0))
BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
load_error_cnic0:
BNX2X_ERR("CNIC-related load failed\n");
@@ -2520,10 +2611,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
}
- /* Allocated memory for FW statistics */
- if (bnx2x_alloc_fw_stats_mem(bp))
- LOAD_ERROR_EXIT(bp, load_error0);
-
/* need to be done after alloc mem, since it's self adjusting to amount
* of memory available for RSS queues
*/
@@ -2533,6 +2620,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error0);
}
+ /* Allocated memory for FW statistics */
+ if (bnx2x_alloc_fw_stats_mem(bp))
+ LOAD_ERROR_EXIT(bp, load_error0);
+
/* request pf to initialize status blocks */
if (IS_VF(bp)) {
rc = bnx2x_vfpf_init(bp);
@@ -2573,7 +2664,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error1);
/* what did mcp say? */
- rc = bnx2x_nic_load_analyze_req(bp, load_code);
+ rc = bnx2x_compare_fw_ver(bp, load_code, true);
if (rc) {
bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
@@ -2641,38 +2732,32 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* initialize FW coalescing state machines in RAM */
bnx2x_update_coalesce(bp);
+ }
- /* setup the leading queue */
- rc = bnx2x_setup_leading(bp);
- if (rc) {
- BNX2X_ERR("Setup leading failed!\n");
- LOAD_ERROR_EXIT(bp, load_error3);
- }
-
- /* set up the rest of the queues */
- for_each_nondefault_eth_queue(bp, i) {
- rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
- if (rc) {
- BNX2X_ERR("Queue setup failed\n");
- LOAD_ERROR_EXIT(bp, load_error3);
- }
- }
+ /* setup the leading queue */
+ rc = bnx2x_setup_leading(bp);
+ if (rc) {
+ BNX2X_ERR("Setup leading failed!\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
- /* setup rss */
- rc = bnx2x_init_rss_pf(bp);
+ /* set up the rest of the queues */
+ for_each_nondefault_eth_queue(bp, i) {
+ if (IS_PF(bp))
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
+ else /* VF */
+ rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
if (rc) {
- BNX2X_ERR("PF RSS init failed\n");
+ BNX2X_ERR("Queue %d setup failed\n", i);
LOAD_ERROR_EXIT(bp, load_error3);
}
+ }
- } else { /* vf */
- for_each_eth_queue(bp, i) {
- rc = bnx2x_vfpf_setup_q(bp, i);
- if (rc) {
- BNX2X_ERR("Queue setup failed\n");
- LOAD_ERROR_EXIT(bp, load_error3);
- }
- }
+ /* setup rss */
+ rc = bnx2x_init_rss(bp);
+ if (rc) {
+ BNX2X_ERR("PF RSS init failed\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
}
/* Now when Clients are configured we are ready to work */
@@ -2704,9 +2789,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Start fast path */
/* Initialize Rx filter. */
- netif_addr_lock_bh(bp->dev);
- bnx2x_set_rx_mode(bp->dev);
- netif_addr_unlock_bh(bp->dev);
+ bnx2x_set_rx_mode_inner(bp);
/* Start the Tx */
switch (load_mode) {
@@ -2717,7 +2800,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
case LOAD_OPEN:
netif_tx_start_all_queues(bp->dev);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
break;
case LOAD_DIAG:
@@ -2740,6 +2823,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (CNIC_ENABLED(bp))
bnx2x_load_cnic(bp);
+ if (IS_PF(bp))
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+
if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
/* mark driver is loaded in shmem2 */
u32 val;
@@ -2795,8 +2881,8 @@ load_error1:
if (IS_PF(bp))
bnx2x_clear_pf_load(bp);
load_error0:
- bnx2x_free_fp_mem(bp);
bnx2x_free_fw_stats_mem(bp);
+ bnx2x_free_fp_mem(bp);
bnx2x_free_mem(bp);
return rc;
@@ -2942,6 +3028,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->port.pmf = 0;
+ /* clear pending work in rtnl task */
+ bp->sp_rtnl_state = 0;
+ smp_mb();
+
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
if (CNIC_LOADED(bp))
@@ -2956,11 +3046,16 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
if (IS_PF(bp)) {
if (CNIC_LOADED(bp))
bnx2x_free_mem_cnic(bp);
- bnx2x_free_mem(bp);
}
+ bnx2x_free_mem(bp);
+
bp->state = BNX2X_STATE_CLOSED;
bp->cnic_loaded = false;
+ /* Clear driver version indication in shmem */
+ if (IS_PF(bp))
+ bnx2x_update_mng_version(bp);
+
/* Check if there are pending parity attentions. If there are - set
* RECOVERY_IN_PROGRESS.
*/
@@ -2990,16 +3085,16 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
u16 pmcsr;
/* If there is no power capability, silently succeed */
- if (!bp->pm_cap) {
+ if (!bp->pdev->pm_cap) {
BNX2X_DEV_INFO("No power capability. Breaking.\n");
return 0;
}
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+ pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
switch (state) {
case PCI_D0:
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
PCI_PM_CTRL_PME_STATUS));
@@ -3023,7 +3118,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
if (bp->wol)
pmcsr |= PCI_PM_CTRL_PME_ENABLE;
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
pmcsr);
/* No more memory access after this point until
@@ -3041,7 +3136,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
/*
* net_device service functions
*/
-int bnx2x_poll(struct napi_struct *napi, int budget)
+static int bnx2x_poll(struct napi_struct *napi, int budget)
{
int work_done = 0;
u8 cos;
@@ -3117,7 +3212,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
return work_done;
}
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */
int bnx2x_low_latency_recv(struct napi_struct *napi)
{
@@ -3238,14 +3333,16 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
if (prot == IPPROTO_TCP)
rc |= XMIT_CSUM_TCP;
- if (skb_is_gso_v6(skb)) {
- rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
- if (rc & XMIT_CSUM_ENC)
- rc |= XMIT_GSO_ENC_V6;
- } else if (skb_is_gso(skb)) {
- rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
- if (rc & XMIT_CSUM_ENC)
- rc |= XMIT_GSO_ENC_V4;
+ if (skb_is_gso(skb)) {
+ if (skb_is_gso_v6(skb)) {
+ rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V6;
+ } else {
+ rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V4;
+ }
}
return rc;
@@ -3798,6 +3895,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set encapsulation flag in start BD */
SET_FLAG(tx_start_bd->general_data,
ETH_TX_START_BD_TUNNEL_EXIST, 1);
+
+ tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
+
nbd++;
} else if (xmit_type & XMIT_CSUM) {
/* Set PBD in checksum offload case w/o encapsulation */
@@ -3806,7 +3906,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
xmit_type);
}
- /* Add the macs to the parsing BD this is a vf */
+ /* Add the macs to the parsing BD if this is a vf or if
+ * Tx Switching is enabled.
+ */
if (IS_VF(bp)) {
/* override GRE parameters in BD */
bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
@@ -3818,6 +3920,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
&pbd_e2->data.mac_addr.dst_mid,
&pbd_e2->data.mac_addr.dst_lo,
eth->h_dest);
+ } else if (bp->flags & TX_SWITCHING) {
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+ &pbd_e2->data.mac_addr.dst_mid,
+ &pbd_e2->data.mac_addr.dst_lo,
+ eth->h_dest);
}
SET_FLAG(pbd_e2_parsing_data,
@@ -4166,7 +4273,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
/* end of fastpath */
}
-void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
{
int i;
for_each_cnic_queue(bp, i)
@@ -4211,7 +4318,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
* fp->eth_q_stats.rx_skb_alloc_failed = 0
*/
for (i = 0; i < rx_ring_size; i++) {
- if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
+ if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
failure_cnt++;
continue;
}
@@ -4294,14 +4401,17 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
if (!IS_FCOE_IDX(index)) {
/* status blocks */
- if (!CHIP_IS_E1x(bp))
- BNX2X_PCI_ALLOC(sb->e2_sb,
- &bnx2x_fp(bp, index, status_blk_mapping),
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_ALLOC(sb->e1x_sb,
- &bnx2x_fp(bp, index, status_blk_mapping),
- sizeof(struct host_hc_status_block_e1x));
+ if (!CHIP_IS_E1x(bp)) {
+ sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ if (!sb->e2_sb)
+ goto alloc_mem_err;
+ } else {
+ sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+ if (!sb->e1x_sb)
+ goto alloc_mem_err;
+ }
}
/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
@@ -4320,35 +4430,49 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
"allocating tx memory of fp %d cos %d\n",
index, cos);
- BNX2X_ALLOC(txdata->tx_buf_ring,
- sizeof(struct sw_tx_bd) * NUM_TX_BD);
- BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
- &txdata->tx_desc_mapping,
- sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
+ sizeof(struct sw_tx_bd),
+ GFP_KERNEL);
+ if (!txdata->tx_buf_ring)
+ goto alloc_mem_err;
+ txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ if (!txdata->tx_desc_ring)
+ goto alloc_mem_err;
}
}
/* Rx */
if (!skip_rx_queue(bp, index)) {
/* fastpath rx rings: rx_buf rx_desc rx_comp */
- BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
- sizeof(struct sw_rx_bd) * NUM_RX_BD);
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
- &bnx2x_fp(bp, index, rx_desc_mapping),
- sizeof(struct eth_rx_bd) * NUM_RX_BD);
+ bnx2x_fp(bp, index, rx_buf_ring) =
+ kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
+ if (!bnx2x_fp(bp, index, rx_buf_ring))
+ goto alloc_mem_err;
+ bnx2x_fp(bp, index, rx_desc_ring) =
+ BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+ if (!bnx2x_fp(bp, index, rx_desc_ring))
+ goto alloc_mem_err;
/* Seed all CQEs by 1s */
- BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
- &bnx2x_fp(bp, index, rx_comp_mapping),
- sizeof(struct eth_fast_path_rx_cqe) *
- NUM_RCQ_BD);
+ bnx2x_fp(bp, index, rx_comp_ring) =
+ BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
+ if (!bnx2x_fp(bp, index, rx_comp_ring))
+ goto alloc_mem_err;
/* SGE ring */
- BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
- sizeof(struct sw_rx_page) * NUM_RX_SGE);
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
- &bnx2x_fp(bp, index, rx_sge_mapping),
- BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ bnx2x_fp(bp, index, rx_page_ring) =
+ kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
+ GFP_KERNEL);
+ if (!bnx2x_fp(bp, index, rx_page_ring))
+ goto alloc_mem_err;
+ bnx2x_fp(bp, index, rx_sge_ring) =
+ BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ if (!bnx2x_fp(bp, index, rx_sge_ring))
+ goto alloc_mem_err;
/* RX BD ring */
bnx2x_set_next_page_rx_bd(fp);
@@ -4380,7 +4504,7 @@ alloc_mem_err:
return 0;
}
-int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
{
if (!NO_FCOE(bp))
/* FCoE */
@@ -4393,7 +4517,7 @@ int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
return 0;
}
-int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
{
int i;
@@ -4704,12 +4828,8 @@ void bnx2x_tx_timeout(struct net_device *dev)
bnx2x_panic();
#endif
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
-
/* This allows the netif to be shutdown gracefully before resetting */
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
}
int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -4782,6 +4902,11 @@ int bnx2x_resume(struct pci_dev *pdev)
void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
u32 cid)
{
+ if (!cxt) {
+ BNX2X_ERR("bad context pointer %p\n", cxt);
+ return;
+ }
+
/* ustorm cxt validation */
cxt->ustorm_ag_context.cdu_usage =
CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
@@ -4832,3 +4957,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
disable = disable ? 1 : (usec ? 0 : 1);
storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
}
+
+void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
+ u32 verbose)
+{
+ smp_mb__before_atomic();
+ set_bit(flag, &bp->sp_rtnl_state);
+ smp_mb__after_atomic();
+ DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
+ flag);
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
+EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index c07a6d054cf..571427c7226 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -21,15 +21,14 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/irq.h>
#include "bnx2x.h"
#include "bnx2x_sriov.h"
/* This is used as a replacement for an MCP if it's not present */
-extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
-
-extern int num_queues;
-extern int int_mode;
+extern int bnx2x_load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
+extern int bnx2x_num_queues;
/************************ Macros ********************************/
#define BNX2X_PCI_FREE(x, y, size) \
@@ -49,32 +48,26 @@ extern int int_mode;
} \
} while (0)
-#define BNX2X_PCI_ALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
- GFP_KERNEL | __GFP_ZERO); \
- if (x == NULL) \
- goto alloc_mem_err; \
- DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
- (unsigned long long)(*y), x); \
- } while (0)
-
-#define BNX2X_PCI_FALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- memset((void *)x, 0xFFFFFFFF, size); \
- DP(NETIF_MSG_HW, "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",\
- (unsigned long long)(*y), x); \
- } while (0)
-
-#define BNX2X_ALLOC(x, size) \
- do { \
- x = kzalloc(size, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- } while (0)
+#define BNX2X_PCI_ALLOC(y, size) \
+({ \
+ void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x) \
+ DP(NETIF_MSG_HW, \
+ "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
+ (unsigned long long)(*y), x); \
+ x; \
+})
+#define BNX2X_PCI_FALLOC(y, size) \
+({ \
+ void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x) { \
+ memset(x, 0xff, size); \
+ DP(NETIF_MSG_HW, \
+ "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \
+ (unsigned long long)(*y), x); \
+ } \
+ x; \
+})
/*********************** Interfaces ****************************
* Functions that need to be implemented by each driver version
@@ -106,9 +99,10 @@ void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
* @rss_obj: RSS object to use
* @ind_table: indirection table to configure
* @config_hash: re-configure RSS hash keys configuration
+ * @enable: enabled or disabled configuration
*/
-int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- bool config_hash);
+int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+ bool config_hash, bool enable);
/**
* bnx2x__init_func_obj - init function object
@@ -417,33 +411,7 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
* If bp->state is OPEN, should be called with
* netif_addr_lock_bh()
*/
-void bnx2x_set_rx_mode(struct net_device *dev);
-
-/**
- * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
- *
- * @bp: driver handle
- *
- * If bp->state is OPEN, should be called with
- * netif_addr_lock_bh().
- */
-int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
-
-/**
- * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
- *
- * @bp: driver handle
- * @cl_id: client id
- * @rx_mode_flags: rx mode configuration
- * @rx_accept_flags: rx accept configuration
- * @tx_accept_flags: tx accept configuration (tx switch)
- * @ramrod_flags: ramrod configuration
- */
-int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags);
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
/* Parity errors related */
void bnx2x_set_pf_load(struct bnx2x *bp);
@@ -523,7 +491,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
/* select_queue callback */
-u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
@@ -563,9 +532,6 @@ int bnx2x_reload_if_running(struct net_device *dev);
int bnx2x_change_mac_addr(struct net_device *dev, void *p);
-/* NAPI poll Rx part */
-int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
-
/* NAPI poll Tx part */
int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
@@ -576,13 +542,9 @@ int bnx2x_resume(struct pci_dev *pdev);
/* Release IRQ vectors */
void bnx2x_free_irq(struct bnx2x *bp);
-void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
void bnx2x_free_fp_mem(struct bnx2x *bp);
-int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
-int bnx2x_alloc_fp_mem(struct bnx2x *bp);
void bnx2x_init_rx_rings(struct bnx2x *bp);
void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
-void bnx2x_free_skbs_cnic(struct bnx2x *bp);
void bnx2x_free_skbs(struct bnx2x *bp);
void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
void bnx2x_netif_start(struct bnx2x *bp);
@@ -606,15 +568,6 @@ int bnx2x_enable_msix(struct bnx2x *bp);
int bnx2x_enable_msi(struct bnx2x *bp);
/**
- * bnx2x_poll - NAPI callback
- *
- * @napi: napi structure
- * @budget:
- *
- */
-int bnx2x_poll(struct napi_struct *napi, int budget);
-
-/**
* bnx2x_low_latency_recv - LL callback
*
* @napi: napi structure
@@ -860,30 +813,6 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
sge->addr_lo = 0;
}
-static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
-{
- int i;
-
- /* Add NAPI objects */
- for_each_rx_queue_cnic(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
- napi_hash_add(&bnx2x_fp(bp, i, napi));
- }
-}
-
-static inline void bnx2x_add_all_napi(struct bnx2x *bp)
-{
- int i;
-
- /* Add NAPI objects */
- for_each_eth_queue(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
- napi_hash_add(&bnx2x_fp(bp, i, napi));
- }
-}
-
static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
{
int i;
@@ -917,14 +846,6 @@ static inline void bnx2x_disable_msi(struct bnx2x *bp)
}
}
-static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
-{
- return num_queues ?
- min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
- min_t(int, netif_get_num_default_rss_queues(),
- BNX2X_MAX_QUEUES(bp));
-}
-
static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
{
int i, j;
@@ -980,7 +901,7 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
{
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash);
+ return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true);
}
/**
@@ -1011,7 +932,7 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
- start_params->gre_tunnel_mode = IPGRE_TUNNEL;
+ start_params->gre_tunnel_mode = L2GRE_TUNNEL;
start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
return bnx2x_func_state_change(bp, &func_params);
@@ -1171,8 +1092,6 @@ static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
return fp->cl_id;
}
-u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
-
static inline void bnx2x_init_txdata(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata, u32 cid,
int txq_index, __le16 *tx_cons_sb,
@@ -1205,47 +1124,6 @@ static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
return bp->igu_base_sb;
}
-static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
-{
- struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
- unsigned long q_type = 0;
-
- bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
- bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
- BNX2X_FCOE_ETH_CL_ID_IDX);
- bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
- bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
- bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
- bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
- bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
- fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
- fp);
-
- DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
-
- /* qZone id equals to FW (per path) client id */
- bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
- /* init shortcut */
- bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
- bnx2x_rx_ustorm_prods_offset(fp);
-
- /* Configure Queue State object */
- __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
- __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
-
- /* No multi-CoS for FCoE L2 client */
- BUG_ON(fp->max_cos != 1);
-
- bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
- &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
- bnx2x_sp_mapping(bp, q_rdata), q_type);
-
- DP(NETIF_MSG_IFUP,
- "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
- fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
- fp->igu_sb_id);
-}
-
static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata)
{
@@ -1442,4 +1320,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
int bnx2x_drain_tx_queues(struct bnx2x *bp);
void bnx2x_squeeze_objects(struct bnx2x *bp);
+void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
+ u32 verbose);
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 0c94df47e0e..51a952c51cb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Dmitry Kravkov
*
*/
@@ -30,10 +30,8 @@
#include "bnx2x_dcb.h"
/* forward declarations of dcbx related functions */
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
u32 *set_configuration_ets_pg,
u32 *pri_pg_tbl);
@@ -425,30 +423,52 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
bnx2x_pfc_clear(bp);
}
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
{
struct bnx2x_func_state_params func_params = {NULL};
+ int rc;
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_TX_STOP;
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n");
- return bnx2x_func_state_change(bp, &func_params);
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+ BNX2X_ERR("Unable to hold traffic for HW configuration\n");
+ bnx2x_panic();
+ }
+
+ return rc;
}
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
{
struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_tx_start_params *tx_params =
&func_params.params.tx_start;
+ int rc;
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_TX_START;
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
bnx2x_dcbx_fw_struct(bp, tx_params);
DP(BNX2X_MSG_DCB, "START TRAFFIC\n");
- return bnx2x_func_state_change(bp, &func_params);
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+ BNX2X_ERR("Unable to resume traffic after HW configuration\n");
+ bnx2x_panic();
+ }
+
+ return rc;
}
static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
@@ -690,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
* as we are handling an attention on a work queue which must be
* flushed at some rtnl-locked contexts (e.g. if down)
*/
- if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0);
}
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
@@ -744,8 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
if (IS_MF(bp))
bnx2x_link_sync_notify(bp);
- bnx2x_dcbx_stop_hw_tx(bp);
-
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0);
return;
}
case BNX2X_DCBX_STATE_TX_PAUSED:
@@ -753,8 +771,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bnx2x_pfc_set_pfc(bp);
bnx2x_dcbx_update_ets_params(bp);
- bnx2x_dcbx_resume_hw_tx(bp);
+ /* ets may affect cmng configuration: reinit it in hw */
+ bnx2x_set_local_cmng(bp);
return;
case BNX2X_DCBX_STATE_TX_RELEASED:
DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n");
@@ -2363,21 +2382,24 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
case DCB_FEATCFG_ATTR_PG:
if (bp->dcbx_local_feat.ets.enabled)
*flags |= DCB_FEATCFG_ENABLE;
- if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
+ if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
case DCB_FEATCFG_ATTR_PFC:
if (bp->dcbx_local_feat.pfc.enabled)
*flags |= DCB_FEATCFG_ENABLE;
if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
- DCBX_LOCAL_PFC_MISMATCH))
+ DCBX_LOCAL_PFC_MISMATCH |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
case DCB_FEATCFG_ATTR_APP:
if (bp->dcbx_local_feat.app.enabled)
*flags |= DCB_FEATCFG_ENABLE;
if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
- DCBX_LOCAL_APP_MISMATCH))
+ DCBX_LOCAL_APP_MISMATCH |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
default:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 125bd1b6586..c6939ecb02c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Dmitry Kravkov
*
*/
@@ -199,4 +199,7 @@ extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
#endif /* BCM_DCBNL */
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+
#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index c5f22510168..25eddd90f48 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -358,49 +358,48 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cfg_idx = bnx2x_get_link_cfg_idx(bp);
old_multi_phy_config = bp->link_params.multi_phy_config;
- switch (cmd->port) {
- case PORT_TP:
- if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
- break; /* no port change */
-
- if (!(bp->port.supported[0] & SUPPORTED_TP ||
- bp->port.supported[1] & SUPPORTED_TP)) {
- DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
- return -EINVAL;
- }
- bp->link_params.multi_phy_config &=
- ~PORT_HW_CFG_PHY_SELECTION_MASK;
- if (bp->link_params.multi_phy_config &
- PORT_HW_CFG_PHY_SWAPPED_ENABLED)
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
- else
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
- break;
- case PORT_FIBRE:
- case PORT_DA:
- if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
- break; /* no port change */
-
- if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
- bp->port.supported[1] & SUPPORTED_FIBRE)) {
+ if (cmd->port != bnx2x_get_port_type(bp)) {
+ switch (cmd->port) {
+ case PORT_TP:
+ if (!(bp->port.supported[0] & SUPPORTED_TP ||
+ bp->port.supported[1] & SUPPORTED_TP)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_FIBRE:
+ case PORT_DA:
+ case PORT_NONE:
+ if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
+ bp->port.supported[1] & SUPPORTED_FIBRE)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ default:
DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
return -EINVAL;
}
- bp->link_params.multi_phy_config &=
- ~PORT_HW_CFG_PHY_SELECTION_MASK;
- if (bp->link_params.multi_phy_config &
- PORT_HW_CFG_PHY_SWAPPED_ENABLED)
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
- else
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
- break;
- default:
- DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
- return -EINVAL;
}
/* Save new config in case command complete successfully */
new_multi_phy_config = bp->link_params.multi_phy_config;
@@ -639,6 +638,9 @@ static int bnx2x_get_regs_len(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
int regdump_len = 0;
+ if (IS_VF(bp))
+ return 0;
+
regdump_len = __bnx2x_get_regs_len(bp);
regdump_len *= 4;
regdump_len += sizeof(struct dump_header);
@@ -891,17 +893,8 @@ static void bnx2x_get_regs(struct net_device *dev,
* will re-enable parity attentions right after the dump.
*/
- /* Disable parity on path 0 */
- bnx2x_pretend_func(bp, 0);
bnx2x_disable_blocks_parity(bp);
- /* Disable parity on path 1 */
- bnx2x_pretend_func(bp, 1);
- bnx2x_disable_blocks_parity(bp);
-
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
dump_hdr.preset = DUMP_ALL_PRESETS;
dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -928,18 +921,9 @@ static void bnx2x_get_regs(struct net_device *dev,
/* Actually read the registers */
__bnx2x_get_regs(bp, p);
- /* Re-enable parity attentions on path 0 */
- bnx2x_pretend_func(bp, 0);
- bnx2x_clear_blocks_parity(bp);
- bnx2x_enable_blocks_parity(bp);
-
- /* Re-enable parity attentions on path 1 */
- bnx2x_pretend_func(bp, 1);
+ /* Re-enable parity attentions */
bnx2x_clear_blocks_parity(bp);
bnx2x_enable_blocks_parity(bp);
-
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
}
static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
@@ -993,17 +977,8 @@ static int bnx2x_get_dump_data(struct net_device *dev,
* will re-enable parity attentions right after the dump.
*/
- /* Disable parity on path 0 */
- bnx2x_pretend_func(bp, 0);
bnx2x_disable_blocks_parity(bp);
- /* Disable parity on path 1 */
- bnx2x_pretend_func(bp, 1);
- bnx2x_disable_blocks_parity(bp);
-
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
dump_hdr.preset = bp->dump_preset_idx;
dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -1032,19 +1007,10 @@ static int bnx2x_get_dump_data(struct net_device *dev,
/* Actually read the registers */
__bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
- /* Re-enable parity attentions on path 0 */
- bnx2x_pretend_func(bp, 0);
- bnx2x_clear_blocks_parity(bp);
- bnx2x_enable_blocks_parity(bp);
-
- /* Re-enable parity attentions on path 1 */
- bnx2x_pretend_func(bp, 1);
+ /* Re-enable parity attentions */
bnx2x_clear_blocks_parity(bp);
bnx2x_enable_blocks_parity(bp);
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
return 0;
}
@@ -1387,9 +1353,9 @@ static bool bnx2x_is_nvm_accessible(struct bnx2x *bp)
u16 pm = 0;
struct net_device *dev = pci_get_drvdata(bp->pdev);
- if (bp->pm_cap)
+ if (bp->pdev->pm_cap)
rc = pci_read_config_word(bp->pdev,
- bp->pm_cap + PCI_PM_CTRL, &pm);
+ bp->pdev->pm_cap + PCI_PM_CTRL, &pm);
if ((rc && !netif_running(dev)) ||
(!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
@@ -1672,6 +1638,12 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
memcpy(&val, data_buf, 4);
+ /* Notice unlike bnx2x_nvram_read_dword() this will not
+ * change val using be32_to_cpu(), which causes data to flip
+ * if the eeprom is read and then written back. This is due
+ * to tools utilizing this functionality that would break
+ * if this would be resolved.
+ */
rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
/* advance to the next dword */
@@ -2900,9 +2872,16 @@ static void bnx2x_self_test(struct net_device *dev,
memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
+ if (bnx2x_test_nvram(bp) != 0) {
+ if (!IS_MF(bp))
+ buf[4] = 1;
+ else
+ buf[0] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
if (!netif_running(dev)) {
- DP(BNX2X_MSG_ETHTOOL,
- "Can't perform self-test when interface is down\n");
+ DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");
return;
}
@@ -2964,13 +2943,7 @@ static void bnx2x_self_test(struct net_device *dev,
/* wait until link state is restored */
bnx2x_wait_for_link(bp, link_up, is_serdes);
}
- if (bnx2x_test_nvram(bp) != 0) {
- if (!IS_MF(bp))
- buf[4] = 1;
- else
- buf[0] = 1;
- etest->flags |= ETH_TEST_FL_FAILED;
- }
+
if (bnx2x_test_intr(bp) != 0) {
if (!IS_MF(bp))
buf[5] = 1;
@@ -2997,8 +2970,9 @@ static void bnx2x_self_test(struct net_device *dev,
#define IS_PORT_STAT(i) \
((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
-#define IS_MF_MODE_STAT(bp) \
- (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
+#define HIDE_PORT_STAT(bp) \
+ ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \
+ IS_VF(bp))
/* ethtool statistics are displayed for all regular ethernet queues and the
* fcoe L2 queue if not disabled
@@ -3020,7 +2994,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
BNX2X_NUM_Q_STATS;
} else
num_strings = 0;
- if (IS_MF_MODE_STAT(bp)) {
+ if (HIDE_PORT_STAT(bp)) {
for (i = 0; i < BNX2X_NUM_STATS; i++)
if (IS_FUNC_STAT(i))
num_strings++;
@@ -3075,7 +3049,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
strcpy(buf + (k + j)*ETH_GSTRING_LEN,
bnx2x_stats_arr[i].string);
@@ -3133,7 +3107,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
hw_stats = (u32 *)&bp->eth_stats;
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
if (bnx2x_stats_arr[i].size == 0) {
/* skip this counter */
@@ -3281,14 +3255,14 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
} else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
}
return 0;
@@ -3343,7 +3317,7 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
return T_ETH_INDIRECTION_TABLE_SIZE;
}
-static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
+static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
{
struct bnx2x *bp = netdev_priv(dev);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
@@ -3367,14 +3341,15 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
return 0;
}
-static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
+static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
/*
- * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
+ * The same as in bnx2x_get_rxfh: we can't use a memcpy()
* as an internal storage of an indirection table is a u8 array
* while indir->ring_index points to an array of u32.
*
@@ -3498,8 +3473,8 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_rxnfc = bnx2x_get_rxnfc,
.set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
- .get_rxfh_indir = bnx2x_get_rxfh_indir,
- .set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_rxfh = bnx2x_get_rxfh,
+ .set_rxfh = bnx2x_set_rxfh,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_module_info = bnx2x_get_module_info,
@@ -3525,16 +3500,14 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
.get_rxnfc = bnx2x_get_rxnfc,
.set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
- .get_rxfh_indir = bnx2x_get_rxfh_indir,
- .set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_rxfh = bnx2x_get_rxfh,
+ .set_rxfh = bnx2x_set_rxfh,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
};
void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
{
- if (IS_PF(bp))
- SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
- else /* vf */
- SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
+ netdev->ethtool_ops = (IS_PF(bp)) ?
+ &bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 84aecdf06f7..95dc3654354 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -87,7 +87,6 @@
(IRO[156].base + ((vfId) * IRO[156].m1))
#define CSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[150].base + ((funcId) * IRO[150].m1))
-#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
(IRO[203].base + ((pfId) * IRO[203].m1))
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index f572ae164fc..8aafd9b5d6a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -6,8 +6,8 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
* Based on the original idea of John Wright <john.wright@hp.com>.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 5018e52ae2a..5ba8af50c84 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -172,6 +172,7 @@ struct shared_hw_cfg { /* NVRAM Offset */
#define SHARED_HW_CFG_LED_MAC4 0x000c0000
#define SHARED_HW_CFG_LED_PHY8 0x000d0000
#define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
+ #define SHARED_HW_CFG_LED_EXTPHY2 0x000f0000
#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
@@ -1300,6 +1301,9 @@ struct drv_func_mb {
#define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
+ #define DRV_MSG_CODE_RMMOD 0xdb000000
+ #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f
+
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
@@ -1372,6 +1376,8 @@ struct drv_func_mb {
#define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
+ #define FW_MSG_CODE_RMMOD_ACK 0xdb100000
+
#define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
#define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
@@ -1997,6 +2003,23 @@ struct shmem_lfa {
#define SHMEM_LFA_DONT_CLEAR_STAT (1<<24)
};
+/* Used to support NSCI get OS driver version
+ * on driver load the version value will be set
+ * on driver unload driver value of 0x0 will be set.
+ */
+struct os_drv_ver {
+#define DRV_VER_NOT_LOADED 0
+
+ /* personalties order is important */
+#define DRV_PERS_ETHERNET 0
+#define DRV_PERS_ISCSI 1
+#define DRV_PERS_FCOE 2
+
+ /* shmem2 struct is constant can't add more personalties here */
+#define MAX_DRV_PERS 3
+ u32 versions[MAX_DRV_PERS];
+};
+
struct ncsi_oem_fcoe_features {
u32 fcoe_features1;
#define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF
@@ -2211,6 +2234,18 @@ struct shmem2_region {
u32 reserved4; /* Offset 0x150 */
u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
#define LINK_ATTR_SYNC_KR2_ENABLE (1<<0)
+
+ u32 reserved5[2];
+ u32 reserved6[PORT_MAX];
+
+ /* driver version for each personality */
+ struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */
+
+ /* Flag to the driver that PF's drv_info_host_addr buffer was read */
+ u32 mfw_drv_indication;
+
+ /* We use indication for each PF (0..3) */
+#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
};
@@ -2842,7 +2877,7 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 8
-#define BCM_5710_FW_REVISION_VERSION 17
+#define BCM_5710_FW_REVISION_VERSION 19
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 76df015f486..bd90e50bd8e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -7,9 +7,9 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
- * Modified by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Modified by: Vladislav Zolotarov
*/
#ifndef BNX2X_INIT_H
@@ -640,23 +640,35 @@ static const struct {
* [30] MCP Latched ump_tx_parity
* [31] MCP Latched scpad_parity
*/
-#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
+#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS \
(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
- AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
+
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
+ (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
/* Below registers control the MCP parity attention output. When
* MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
* enabled, when cleared - disabled.
*/
-static const u32 mcp_attn_ctl_regs[] = {
- MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
- MISC_REG_AEU_ENABLE4_NIG_0,
- MISC_REG_AEU_ENABLE4_PXP_0,
- MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
- MISC_REG_AEU_ENABLE4_NIG_1,
- MISC_REG_AEU_ENABLE4_PXP_1
+static const struct {
+ u32 addr;
+ u32 bits;
+} mcp_attn_ctl_regs[] = {
+ { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+ MISC_AEU_ENABLE_MCP_PRTY_BITS },
+ { MISC_REG_AEU_ENABLE4_NIG_0,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+ { MISC_REG_AEU_ENABLE4_PXP_0,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+ { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+ MISC_AEU_ENABLE_MCP_PRTY_BITS },
+ { MISC_REG_AEU_ENABLE4_NIG_1,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+ { MISC_REG_AEU_ENABLE4_PXP_1,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
};
static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
@@ -665,14 +677,14 @@ static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
u32 reg_val;
for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
- reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
+ reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr);
if (enable)
- reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+ reg_val |= mcp_attn_ctl_regs[i].bits;
else
- reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+ reg_val &= ~mcp_attn_ctl_regs[i].bits;
- REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
+ REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 8ab0dd90096..5669ed2e87d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -8,8 +8,8 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
*/
#ifndef BNX2X_INIT_OPS_H
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 9d64b988ab3..53fb4fa61b4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -175,6 +175,7 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
#define EDC_MODE_LINEAR 0x0022
#define EDC_MODE_LIMITING 0x0044
#define EDC_MODE_PASSIVE_DAC 0x0055
+#define EDC_MODE_ACTIVE_DAC 0x0066
/* ETS defines*/
#define DCBX_INVALID_COS (0xFF)
@@ -204,6 +205,11 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
(_bank + (_addr & 0xf)), \
_val)
+static int bnx2x_check_half_open_conn(struct link_params *params,
+ struct link_vars *vars, u8 notify);
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params);
+
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@@ -1398,57 +1404,6 @@ static void bnx2x_update_pfc_xmac(struct link_params *params,
udelay(30);
}
-
-static void bnx2x_emac_get_pfc_stat(struct link_params *params,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2])
-{
- /* Read pfc statistic */
- struct bnx2x *bp = params->bp;
- u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
- u32 val_xon = 0;
- u32 val_xoff = 0;
-
- DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
-
- /* PFC received frames */
- val_xoff = REG_RD(bp, emac_base +
- EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
- val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
- val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
- val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
-
- pfc_frames_received[0] = val_xon + val_xoff;
-
- /* PFC received sent */
- val_xoff = REG_RD(bp, emac_base +
- EMAC_REG_RX_PFC_STATS_XOFF_SENT);
- val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
- val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
- val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
-
- pfc_frames_sent[0] = val_xon + val_xoff;
-}
-
-/* Read pfc statistic*/
-void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2])
-{
- /* Read pfc statistic */
- struct bnx2x *bp = params->bp;
-
- DP(NETIF_MSG_LINK, "pfc statistic\n");
-
- if (!vars->link_up)
- return;
-
- if (vars->mac_type == MAC_TYPE_EMAC) {
- DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
- bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
- pfc_frames_received);
- }
-}
/******************************************************************/
/* MAC/PBF section */
/******************************************************************/
@@ -2263,7 +2218,6 @@ int bnx2x_update_pfc(struct link_params *params,
*/
u32 val;
struct bnx2x *bp = params->bp;
- int bnx2x_status = 0;
u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
@@ -2277,7 +2231,7 @@ int bnx2x_update_pfc(struct link_params *params,
bnx2x_update_pfc_nig(params, vars, pfc_params);
if (!vars->link_up)
- return bnx2x_status;
+ return 0;
DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
@@ -2291,7 +2245,7 @@ int bnx2x_update_pfc(struct link_params *params,
== 0) {
DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
bnx2x_emac_enable(params, vars, 0);
- return bnx2x_status;
+ return 0;
}
if (CHIP_IS_E2(bp))
bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
@@ -2305,7 +2259,7 @@ int bnx2x_update_pfc(struct link_params *params,
val = 1;
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
}
- return bnx2x_status;
+ return 0;
}
static int bnx2x_bmac1_enable(struct link_params *params,
@@ -3121,7 +3075,7 @@ static void bnx2x_bsc_module_sel(struct link_params *params)
}
static int bnx2x_bsc_read(struct link_params *params,
- struct bnx2x_phy *phy,
+ struct bnx2x *bp,
u8 sl_devid,
u16 sl_addr,
u8 lc_addr,
@@ -3130,7 +3084,6 @@ static int bnx2x_bsc_read(struct link_params *params,
{
u32 val, i;
int rc = 0;
- struct bnx2x *bp = params->bp;
if (xfer_cnt > 16) {
DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
@@ -3684,6 +3637,41 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
bnx2x_update_link_attr(params, vars->link_attr_sync);
}
+static void bnx2x_disable_kr2(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ int i;
+ static struct bnx2x_reg_set reg_set[] = {
+ /* Step 1 - Program the TX/RX alignment markers */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
+ };
+ DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
+
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+ vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+ bnx2x_update_link_attr(params, vars->link_attr_sync);
+
+ vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
+}
+
static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -3714,8 +3702,8 @@ static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,
static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
- u16 lane, i, cl72_ctrl, an_adv = 0;
- u16 ucode_ver;
+ u16 lane, i, cl72_ctrl, an_adv = 0, val;
+ u32 wc_lane_config;
struct bnx2x *bp = params->bp;
static struct bnx2x_reg_set reg_set[] = {
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3806,15 +3794,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Advertise pause */
bnx2x_ext_phy_set_pause(params, phy, vars);
- /* Set KR Autoneg Work-Around flag for Warpcore version older than D108
- */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
- if (ucode_ver < 0xd108) {
- DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
- ucode_ver);
- vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
- }
+ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
@@ -3838,6 +3818,33 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
bnx2x_set_aer_mmd(params, phy);
bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+ } else {
+ /* Enable Auto-Detect to support 1G over CL37 as well */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
+ wc_lane_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ shared_hw_config.wc_lane_config));
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val);
+ /* Force cl48 sync_status LOW to avoid getting stuck in CL73
+ * parallel-detect loop when CL73 and CL37 are enabled.
+ */
+ val |= 1 << 11;
+
+ /* Restore Polarity settings in case it was run over by
+ * previous link owner
+ */
+ if (wc_lane_config &
+ (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane))
+ val |= 3 << 2;
+ else
+ val &= ~(3 << 2);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4),
+ val);
+
+ bnx2x_disable_kr2(params, vars, phy);
}
/* Enable Autoneg: only on the main lane */
@@ -4347,20 +4354,14 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u32 serdes_net_if;
u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
- u16 lane = bnx2x_get_warpcore_lane(phy, params);
vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
if (!vars->turn_to_run_wc_rt)
return;
- /* Return if there is no link partner */
- if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
- DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
- return;
- }
-
if (vars->rx_tx_asic_rst) {
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
serdes_net_if = (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_hw_config[params->port].default_cfg)) &
@@ -4375,14 +4376,8 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
/*10G KR*/
lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
- DP(NETIF_MSG_LINK,
- "gp_status1 0x%x\n", gp_status1);
-
if (lnkup_kr || lnkup) {
- vars->rx_tx_asic_rst = 0;
- DP(NETIF_MSG_LINK,
- "link up, rx_tx_asic_rst 0x%x\n",
- vars->rx_tx_asic_rst);
+ vars->rx_tx_asic_rst = 0;
} else {
/* Reset the lane to see if link comes up.*/
bnx2x_warpcore_reset_lane(bp, phy, 1);
@@ -4507,10 +4502,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
* enabled transmitter to avoid current leakage in case
* no module is connected
*/
- if (bnx2x_is_sfp_module_plugged(phy, params))
- bnx2x_sfp_module_detection(phy, params);
- else
- bnx2x_sfp_e3_set_transmitter(params, phy, 1);
+ if ((params->loopback_mode == LOOPBACK_NONE) ||
+ (params->loopback_mode == LOOPBACK_EXT)) {
+ if (bnx2x_is_sfp_module_plugged(phy, params))
+ bnx2x_sfp_module_detection(phy, params);
+ else
+ bnx2x_sfp_e3_set_transmitter(params,
+ phy, 1);
+ }
bnx2x_warpcore_config_sfi(phy, params);
break;
@@ -5757,6 +5756,11 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
duplex);
+ /* In case of KR link down, start up the recovering procedure */
+ if ((!link_up) && (phy->media_type == ETH_PHY_KR) &&
+ (!(phy->flags & FLAGS_WC_DUAL_MODE)))
+ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+
DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
vars->duplex, vars->flow_ctrl, vars->link_status);
return rc;
@@ -6345,9 +6349,15 @@ int bnx2x_set_led(struct link_params *params,
* intended override.
*/
break;
- } else
+ } else {
+ u32 nig_led_mode = ((params->hw_led_mode <<
+ SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY2) ?
+ (SHARED_HW_CFG_LED_PHY1 >>
+ SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- hw_led_mode);
+ nig_led_mode);
+ }
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
/* Set blinking rate to ~15.9Hz */
@@ -6475,7 +6485,6 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
static int bnx2x_link_initialize(struct link_params *params,
struct link_vars *vars)
{
- int rc = 0;
u8 phy_index, non_ext_phy;
struct bnx2x *bp = params->bp;
/* In case of external phy existence, the line speed would be the
@@ -6503,12 +6512,15 @@ static int bnx2x_link_initialize(struct link_params *params,
(CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp)))
bnx2x_set_parallel_detection(phy, params);
- if (params->phy[INT_PHY].config_init)
- params->phy[INT_PHY].config_init(phy,
- params,
- vars);
+ if (params->phy[INT_PHY].config_init)
+ params->phy[INT_PHY].config_init(phy, params, vars);
}
+ /* Re-read this value in case it was changed inside config_init due to
+ * limitations of optic module
+ */
+ vars->line_speed = params->phy[INT_PHY].req_line_speed;
+
/* Init external phy*/
if (non_ext_phy) {
if (params->phy[INT_PHY].supported &
@@ -6545,7 +6557,7 @@ static int bnx2x_link_initialize(struct link_params *params,
NIG_STATUS_XGXS0_LINK_STATUS |
NIG_STATUS_SERDES0_LINK_STATUS |
NIG_MASK_MI_INT));
- return rc;
+ return 0;
}
static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
@@ -7888,7 +7900,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
usleep_range(1000, 2000);
bnx2x_warpcore_power_module(params, 1);
}
- rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt,
+ rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt,
data_array);
} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -8082,18 +8094,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
if (copper_module_type &
SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
- check_limiting_mode = 1;
- } else if (copper_module_type &
- SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ *edc_mode = EDC_MODE_ACTIVE_DAC;
+ else
+ check_limiting_mode = 1;
+ } else {
+ *edc_mode = EDC_MODE_PASSIVE_DAC;
+ /* Even in case PASSIVE_DAC indication is not set,
+ * treat it as a passive DAC cable, since some cables
+ * don't have this indication.
+ */
+ if (copper_module_type &
+ SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
DP(NETIF_MSG_LINK,
"Passive Copper cable detected\n");
- *edc_mode =
- EDC_MODE_PASSIVE_DAC;
- } else {
- DP(NETIF_MSG_LINK,
- "Unknown copper-cable-type 0x%x !!!\n",
- copper_module_type);
- return -EINVAL;
+ } else {
+ DP(NETIF_MSG_LINK,
+ "Unknown copper-cable-type\n");
+ }
}
break;
}
@@ -8557,6 +8575,7 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
break;
case EDC_MODE_PASSIVE_DAC:
+ case EDC_MODE_ACTIVE_DAC:
mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
break;
default:
@@ -8594,8 +8613,8 @@ static void bnx2x_set_limiting_mode(struct link_params *params,
}
}
-int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
- struct link_params *params)
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 edc_mode;
@@ -9732,32 +9751,41 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
an_1000_val);
- /* set 100 speed advertisement */
- if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) {
- an_10_100_val |= (1<<7);
- /* Enable autoneg and restart autoneg for legacy speeds */
- autoneg_val |= (1<<9 | 1<<12);
-
- if (phy->req_duplex == DUPLEX_FULL)
+ /* Set 10/100 speed advertisement */
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+ /* Enable autoneg and restart autoneg for legacy speeds
+ */
+ autoneg_val |= (1<<9 | 1<<12);
an_10_100_val |= (1<<8);
- DP(NETIF_MSG_LINK, "Advertising 100M\n");
- }
- /* set 10 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) &&
- (phy->supported &
- (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full)))) {
- an_10_100_val |= (1<<5);
- autoneg_val |= (1<<9 | 1<<12);
- if (phy->req_duplex == DUPLEX_FULL)
+ DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+ }
+
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+ /* Enable autoneg and restart autoneg for legacy speeds
+ */
+ autoneg_val |= (1<<9 | 1<<12);
+ an_10_100_val |= (1<<7);
+ DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+ }
+
+ if ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+ (phy->supported & SUPPORTED_10baseT_Full)) {
an_10_100_val |= (1<<6);
- DP(NETIF_MSG_LINK, "Advertising 10M\n");
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+ }
+
+ if ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) &&
+ (phy->supported & SUPPORTED_10baseT_Half)) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+ }
}
/* Only 10/100 are allowed to work in FORCE mode */
@@ -10611,10 +10639,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
0x40);
} else {
+ /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED
+ * sources are all wired through LED1, rather than only
+ * 10G in other modes.
+ */
+ val = ((params->hw_led_mode <<
+ SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80;
+
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
- 0x80);
+ val);
/* Tell LED3 to blink on source */
bnx2x_cl45_read(bp, phy,
@@ -10770,9 +10806,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
(1<<11));
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
- (phy->req_line_speed == SPEED_1000)) {
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == SPEED_1000)) {
an_1000_val |= (1<<8);
autoneg_val |= (1<<9 | 1<<12);
if (phy->req_duplex == DUPLEX_FULL)
@@ -10788,30 +10824,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
0x09,
&an_1000_val);
- /* Set 100 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
- an_10_100_val |= (1<<7);
- /* Enable autoneg and restart autoneg for legacy speeds */
- autoneg_val |= (1<<9 | 1<<12);
-
- if (phy->req_duplex == DUPLEX_FULL)
- an_10_100_val |= (1<<8);
- DP(NETIF_MSG_LINK, "Advertising 100M\n");
- }
-
- /* Set 10 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
- an_10_100_val |= (1<<5);
- autoneg_val |= (1<<9 | 1<<12);
- if (phy->req_duplex == DUPLEX_FULL)
+ /* Advertise 10/100 link speed */
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
an_10_100_val |= (1<<6);
- DP(NETIF_MSG_LINK, "Advertising 10M\n");
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+ an_10_100_val |= (1<<7);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+ an_10_100_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+ }
}
/* Only 10/100 are allowed to work in FORCE mode */
@@ -12434,6 +12472,7 @@ static int bnx2x_avoid_link_flap(struct link_params *params,
u32 dont_clear_stat, lfa_sts;
struct bnx2x *bp = params->bp;
+ bnx2x_set_mdio_emac_per_phy(bp, params);
/* Sync the link parameters */
bnx2x_link_status_update(params, vars);
@@ -13287,6 +13326,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
old_status, status);
+ /* Do not touch the link in case physical link down */
+ if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
+ return 1;
+
/* a. Update shmem->link_status accordingly
* b. Update link_vars->link_up
*/
@@ -13336,9 +13379,9 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
* a fault, for example, due to break in the TX side of fiber.
*
******************************************************************************/
-int bnx2x_check_half_open_conn(struct link_params *params,
- struct link_vars *vars,
- u8 notify)
+static int bnx2x_check_half_open_conn(struct link_params *params,
+ struct link_vars *vars,
+ u8 notify)
{
struct bnx2x *bp = params->bp;
u32 lss_status = 0;
@@ -13434,43 +13477,6 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
}
}
}
-static void bnx2x_disable_kr2(struct link_params *params,
- struct link_vars *vars,
- struct bnx2x_phy *phy)
-{
- struct bnx2x *bp = params->bp;
- int i;
- static struct bnx2x_reg_set reg_set[] = {
- /* Step 1 - Program the TX/RX alignment markers */
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
- };
- DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
-
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
- bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
- reg_set[i].val);
- vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
- bnx2x_update_link_attr(params, vars->link_attr_sync);
-
- vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
- /* Restart AN on leading lane */
- bnx2x_warpcore_restart_AN_KR(phy, params);
-}
-
static void bnx2x_kr2_recovery(struct link_params *params,
struct link_vars *vars,
struct bnx2x_phy *phy)
@@ -13532,7 +13538,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
*/
not_kr2_device = (((base_page & 0x8000) == 0) ||
(((base_page & 0x8000) &&
- ((next_page & 0xe0) == 0x2))));
+ ((next_page & 0xe0) == 0x20))));
/* In case KR2 is already disabled, check if we need to re-enable it */
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
@@ -13548,6 +13554,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* Disable KR2 on both lanes */
DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
bnx2x_disable_kr2(params, vars, phy);
+ /* Restart AN on leading lane */
+ bnx2x_warpcore_restart_AN_KR(phy, params);
return;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 4df45234fdc..389f5f8cb0a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -533,19 +533,11 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
struct bnx2x_ets_params *ets_params);
-/* Read pfc statistic*/
-void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2]);
+
void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
u32 chip_id, u32 shmem_base, u32 shmem2_base,
u8 port);
-int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
- struct link_params *params);
-
void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
-int bnx2x_check_half_open_conn(struct link_params *params,
- struct link_vars *vars, u8 notify);
#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e5da07858a2..6a8b1453a1b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/aer.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -93,33 +94,34 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
-int num_queues;
-module_param(num_queues, int, 0);
+int bnx2x_num_queues;
+module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
MODULE_PARM_DESC(num_queues,
" Set number of queues (default is as a number of CPUs)");
static int disable_tpa;
-module_param(disable_tpa, int, 0);
+module_param(disable_tpa, int, S_IRUGO);
MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
-int int_mode;
-module_param(int_mode, int, 0);
+static int int_mode;
+module_param(int_mode, int, S_IRUGO);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
static int dropless_fc;
-module_param(dropless_fc, int, 0);
+module_param(dropless_fc, int, S_IRUGO);
MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
static int mrrs = -1;
-module_param(mrrs, int, 0);
+module_param(mrrs, int, S_IRUGO);
MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
static int debug;
-module_param(debug, int, 0);
+module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, " Default debug msglevel");
-struct workqueue_struct *bnx2x_wq;
+static struct workqueue_struct *bnx2x_wq;
+struct workqueue_struct *bnx2x_iov_wq;
struct bnx2x_mac_vals {
u32 xmac_addr;
@@ -278,6 +280,12 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
#define BNX2X_PREV_WAIT_NEEDED 1
static DEFINE_SEMAPHORE(bnx2x_prev_sem);
static LIST_HEAD(bnx2x_prev_list);
+
+/* Forward declaration */
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+
/****************************************************************************
* General service functions
****************************************************************************/
@@ -503,9 +511,9 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
}
/* issue a dmae command over the init-channel and wait for completion */
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u32 *comp)
{
- u32 *wb_comp = bnx2x_sp(bp, wb_comp);
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
int rc = 0;
@@ -518,14 +526,14 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
spin_lock_bh(&bp->dmae_lock);
/* reset completion */
- *wb_comp = 0;
+ *comp = 0;
/* post the command on the channel used for initializations */
bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
/* wait for completion */
udelay(5);
- while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
+ while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
if (!cnt ||
(bp->recovery_state != BNX2X_RECOVERY_DONE &&
@@ -537,7 +545,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
cnt--;
udelay(50);
}
- if (*wb_comp & DMAE_PCI_ERR_FLAG) {
+ if (*comp & DMAE_PCI_ERR_FLAG) {
BNX2X_ERR("DMAE PCI error!\n");
rc = DMAE_PCI_ERROR;
}
@@ -574,10 +582,12 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
dmae.len = len32;
/* issue the command and wait for completion */
- rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
+ rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
if (rc) {
BNX2X_ERR("DMAE returned failure %d\n", rc);
+#ifdef BNX2X_STOP_ON_ERROR
bnx2x_panic();
+#endif
}
}
@@ -611,10 +621,12 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
dmae.len = len32;
/* issue the command and wait for completion */
- rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
+ rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
if (rc) {
BNX2X_ERR("DMAE returned failure %d\n", rc);
+#ifdef BNX2X_STOP_ON_ERROR
bnx2x_panic();
+#endif
}
}
@@ -751,6 +763,10 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
return rc;
}
+#define MCPR_TRACE_BUFFER_SIZE (0x800)
+#define SCRATCH_BUFFER_SIZE(bp) \
+ (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
+
void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
{
u32 addr, val;
@@ -775,7 +791,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
trace_shmem_base = bp->common.shmem_base;
else
trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
- addr = trace_shmem_base - 0x800;
+
+ /* sanity */
+ if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
+ trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
+ SCRATCH_BUFFER_SIZE(bp)) {
+ BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
+ trace_shmem_base);
+ return;
+ }
+
+ addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
/* validate TRCB signature */
mark = REG_RD(bp, addr);
@@ -787,14 +813,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
/* read cyclic buffer pointer */
addr += 4;
mark = REG_RD(bp, addr);
- mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
- + ((mark + 0x3) & ~0x3) - 0x08000000;
+ mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
+ if (mark >= trace_shmem_base || mark < addr + 4) {
+ BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
+ return;
+ }
printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
printk("%s", lvl);
/* dump buffer after the mark */
- for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
+ for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
@@ -890,7 +919,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
u16 start = 0, end = 0;
u8 cos;
#endif
- if (disable_int)
+ if (IS_PF(bp) && disable_int)
bnx2x_int_disable(bp);
bp->stats_state = STATS_STATE_DISABLED;
@@ -901,33 +930,41 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
/* Indices */
/* Common */
- BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
- bp->def_idx, bp->def_att_idx, bp->attn_state,
- bp->spq_prod_idx, bp->stats_counter);
- BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
- bp->def_status_blk->atten_status_block.attn_bits,
- bp->def_status_blk->atten_status_block.attn_bits_ack,
- bp->def_status_blk->atten_status_block.status_block_id,
- bp->def_status_blk->atten_status_block.attn_bits_index);
- BNX2X_ERR(" def (");
- for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
- pr_cont("0x%x%s",
- bp->def_status_blk->sp_sb.index_values[i],
- (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
-
- for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
- *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
- i*sizeof(u32));
-
- pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
- sp_sb_data.igu_sb_id,
- sp_sb_data.igu_seg_id,
- sp_sb_data.p_func.pf_id,
- sp_sb_data.p_func.vnic_id,
- sp_sb_data.p_func.vf_id,
- sp_sb_data.p_func.vf_valid,
- sp_sb_data.state);
+ if (IS_PF(bp)) {
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ int data_size, cstorm_offset;
+
+ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+ bp->def_idx, bp->def_att_idx, bp->attn_state,
+ bp->spq_prod_idx, bp->stats_counter);
+ BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
+ def_sb->atten_status_block.attn_bits,
+ def_sb->atten_status_block.attn_bits_ack,
+ def_sb->atten_status_block.status_block_id,
+ def_sb->atten_status_block.attn_bits_index);
+ BNX2X_ERR(" def (");
+ for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
+ pr_cont("0x%x%s",
+ def_sb->sp_sb.index_values[i],
+ (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
+
+ data_size = sizeof(struct hc_sp_status_block_data) /
+ sizeof(u32);
+ cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
+ for (i = 0; i < data_size; i++)
+ *((u32 *)&sp_sb_data + i) =
+ REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
+ i * sizeof(u32));
+
+ pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
+ sp_sb_data.igu_sb_id,
+ sp_sb_data.igu_seg_id,
+ sp_sb_data.p_func.pf_id,
+ sp_sb_data.p_func.vnic_id,
+ sp_sb_data.p_func.vf_id,
+ sp_sb_data.p_func.vf_valid,
+ sp_sb_data.state);
+ }
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -985,6 +1022,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
pr_cont("0x%x%s",
fp->sb_index_values[j],
(j == loop - 1) ? ")" : " ");
+
+ /* VF cannot access FW refelection for status block */
+ if (IS_VF(bp))
+ continue;
+
/* fw sb data */
data_size = CHIP_IS_E1x(bp) ?
sizeof(struct hc_status_block_data_e1x) :
@@ -1036,16 +1078,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
}
#ifdef BNX2X_STOP_ON_ERROR
-
- /* event queue */
- BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
- for (i = 0; i < NUM_EQ_DESC; i++) {
- u32 *data = (u32 *)&bp->eq_ring[i].message.data;
-
- BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
- i, bp->eq_ring[i].message.opcode,
- bp->eq_ring[i].message.error);
- BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
+ if (IS_PF(bp)) {
+ /* event queue */
+ BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
+ for (i = 0; i < NUM_EQ_DESC; i++) {
+ u32 *data = (u32 *)&bp->eq_ring[i].message.data;
+
+ BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
+ i, bp->eq_ring[i].message.opcode,
+ bp->eq_ring[i].message.error);
+ BNX2X_ERR("data: %x %x %x\n",
+ data[0], data[1], data[2]);
+ }
}
/* Rings */
@@ -1112,8 +1156,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
}
}
#endif
- bnx2x_fw_dump(bp);
- bnx2x_mc_assert(bp);
+ if (IS_PF(bp)) {
+ bnx2x_fw_dump(bp);
+ bnx2x_mc_assert(bp);
+ }
BNX2X_ERR("end crash dump -----------------\n");
}
@@ -1786,6 +1832,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
drv_cmd = BNX2X_Q_CMD_EMPTY;
break;
+ case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
+ DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
+ drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
+ break;
+
default:
BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
command, fp->index);
@@ -1806,13 +1857,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
#else
return;
#endif
- /* SRIOV: reschedule any 'in_progress' operations */
- bnx2x_iov_sp_event(bp, cid, true);
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_inc(&bp->cq_spq_left);
/* push the change in bp->spq_left and towards the memory */
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
@@ -1827,11 +1876,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
* sp_state is cleared, and this order prevents
* races
*/
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
wmb();
clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
/* schedule the sp task as mcp ack is required */
bnx2x_schedule_sp_task(bp);
@@ -2261,6 +2310,23 @@ static void bnx2x_set_requested_fc(struct bnx2x *bp)
bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
}
+static void bnx2x_init_dropless_fc(struct bnx2x *bp)
+{
+ u32 pause_enabled = 0;
+
+ if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ pause_enabled = 1;
+
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
+ pause_enabled);
+ }
+
+ DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
+ pause_enabled ? "enabled" : "disabled");
+}
+
int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
{
int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
@@ -2294,6 +2360,8 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
bnx2x_release_phy_lock(bp);
+ bnx2x_init_dropless_fc(bp);
+
bnx2x_calc_fc_adv(bp);
if (bp->link_vars.link_up) {
@@ -2315,6 +2383,8 @@ void bnx2x_link_set(struct bnx2x *bp)
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
+ bnx2x_init_dropless_fc(bp);
+
bnx2x_calc_fc_adv(bp);
} else
BNX2X_ERR("Bootcode is missing - can not set link\n");
@@ -2476,7 +2546,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
input.port_rate = bp->link_vars.line_speed;
- if (cmng_type == CMNG_FNS_MINMAX) {
+ if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
int vn;
/* read mf conf from shmem */
@@ -2533,6 +2603,21 @@ static void storm_memset_cmng(struct bnx2x *bp,
}
}
+/* init cmng mode in HW according to local configuration */
+void bnx2x_set_local_cmng(struct bnx2x *bp)
+{
+ int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
+
+ if (cmng_fns != CMNG_FNS_NONE) {
+ bnx2x_cmng_fns_init(bp, false, cmng_fns);
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+ } else {
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "single function mode without fairness\n");
+ }
+}
+
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
@@ -2541,20 +2626,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
bnx2x_link_update(&bp->link_params, &bp->link_vars);
- if (bp->link_vars.link_up) {
-
- /* dropless flow control */
- if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
- int port = BP_PORT(bp);
- u32 pause_enabled = 0;
-
- if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
- pause_enabled = 1;
+ bnx2x_init_dropless_fc(bp);
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
- pause_enabled);
- }
+ if (bp->link_vars.link_up) {
if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
struct host_port_stats *pstats;
@@ -2568,17 +2642,8 @@ static void bnx2x_link_attn(struct bnx2x *bp)
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
}
- if (bp->link_vars.link_up && bp->link_vars.line_speed) {
- int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
-
- if (cmng_fns != CMNG_FNS_NONE) {
- bnx2x_cmng_fns_init(bp, false, cmng_fns);
- storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
- } else
- /* rate shaping and fairness are disabled */
- DP(NETIF_MSG_IFUP,
- "single function mode without fairness\n");
- }
+ if (bp->link_vars.link_up && bp->link_vars.line_speed)
+ bnx2x_set_local_cmng(bp);
__bnx2x_link_report(bp);
@@ -2963,6 +3028,9 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
if (zero_stats)
__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+ if (bp->flags & TX_SWITCHING)
+ __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
+
__set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
__set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
@@ -3260,6 +3328,10 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
ether_stat->txq_size = bp->tx_ring_size;
ether_stat->rxq_size = bp->rx_ring_size;
+
+#ifdef CONFIG_BNX2X_SRIOV
+ ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
+#endif
}
static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
@@ -3409,10 +3481,15 @@ static void bnx2x_handle_eee_event(struct bnx2x *bp)
bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
}
+#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
+#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
+
static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
{
enum drv_info_opcode op_code;
u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+ bool release = false;
+ int wait;
/* if drv_info version supported by MFW doesn't match - send NACK */
if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
@@ -3423,6 +3500,9 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
DRV_INFO_CONTROL_OP_CODE_SHIFT;
+ /* Must prevent other flows from accessing drv_info_to_mcp */
+ mutex_lock(&bp->drv_info_mutex);
+
memset(&bp->slowpath->drv_info_to_mcp, 0,
sizeof(union drv_info_to_mcp));
@@ -3439,7 +3519,7 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
default:
/* if op code isn't supported - send NACK */
bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
- return;
+ goto out;
}
/* if we got drv_info attn from MFW then these fields are defined in
@@ -3451,6 +3531,106 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+
+ /* Since possible management wants both this and get_driver_version
+ * need to wait until management notifies us it finished utilizing
+ * the buffer.
+ */
+ if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
+ DP(BNX2X_MSG_MCP, "Management does not support indication\n");
+ } else if (!bp->drv_info_mng_owner) {
+ u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
+
+ for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
+ u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
+
+ /* Management is done; need to clear indication */
+ if (indication & bit) {
+ SHMEM2_WR(bp, mfw_drv_indication,
+ indication & ~bit);
+ release = true;
+ break;
+ }
+
+ msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
+ }
+ }
+ if (!release) {
+ DP(BNX2X_MSG_MCP, "Management did not release indication\n");
+ bp->drv_info_mng_owner = true;
+ }
+
+out:
+ mutex_unlock(&bp->drv_info_mutex);
+}
+
+static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
+{
+ u8 vals[4];
+ int i = 0;
+
+ if (bnx2x_format) {
+ i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
+ &vals[0], &vals[1], &vals[2], &vals[3]);
+ if (i > 0)
+ vals[0] -= '0';
+ } else {
+ i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
+ &vals[0], &vals[1], &vals[2], &vals[3]);
+ }
+
+ while (i < 4)
+ vals[i++] = 0;
+
+ return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
+}
+
+void bnx2x_update_mng_version(struct bnx2x *bp)
+{
+ u32 iscsiver = DRV_VER_NOT_LOADED;
+ u32 fcoever = DRV_VER_NOT_LOADED;
+ u32 ethver = DRV_VER_NOT_LOADED;
+ int idx = BP_FW_MB_IDX(bp);
+ u8 *version;
+
+ if (!SHMEM2_HAS(bp, func_os_drv_ver))
+ return;
+
+ mutex_lock(&bp->drv_info_mutex);
+ /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
+ if (bp->drv_info_mng_owner)
+ goto out;
+
+ if (bp->state != BNX2X_STATE_OPEN)
+ goto out;
+
+ /* Parse ethernet driver version */
+ ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+ if (!CNIC_LOADED(bp))
+ goto out;
+
+ /* Try getting storage driver version via cnic */
+ memset(&bp->slowpath->drv_info_to_mcp, 0,
+ sizeof(union drv_info_to_mcp));
+ bnx2x_drv_info_iscsi_stat(bp);
+ version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
+ iscsiver = bnx2x_update_mng_version_utility(version, false);
+
+ memset(&bp->slowpath->drv_info_to_mcp, 0,
+ sizeof(union drv_info_to_mcp));
+ bnx2x_drv_info_fcoe_stat(bp);
+ version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
+ fcoever = bnx2x_update_mng_version_utility(version, false);
+
+out:
+ SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
+ SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
+ SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
+
+ mutex_unlock(&bp->drv_info_mutex);
+
+ DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
+ ethver, iscsiver, fcoever);
}
static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
@@ -3593,10 +3773,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
HW_CID(bp, cid));
- type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
-
- type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
- SPE_HDR_FUNCTION_ID);
+ /* In some cases, type may already contain the func-id
+ * mainly in SRIOV related use cases, so we add it here only
+ * if it's not already set.
+ */
+ if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
+ type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
+ SPE_HDR_CONN_TYPE;
+ type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
+ } else {
+ type = cmd_type;
+ }
spe->hdr.type = cpu_to_le16(type);
@@ -3827,10 +4015,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
* This is due to some boards consuming sufficient power when driver is
* up to overheat if fan fails.
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
}
static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3974,7 +4159,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
bnx2x_handle_drv_info_req(bp);
if (val & DRV_STATUS_VF_DISABLED)
- bnx2x_vf_handle_flr_event(bp);
+ bnx2x_schedule_iov_task(bp,
+ BNX2X_IOV_HANDLE_FLR);
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
@@ -4264,65 +4450,60 @@ static void _print_next_block(int idx, const char *blk)
pr_cont("%s%s", idx ? ", " : "", blk);
}
-static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
- int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
+ int *par_num, bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- switch (cur_bit) {
- case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "BRB");
+ res |= true; /* Each bit is real error! */
+
+ if (print) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+ _print_next_block((*par_num)++, "BRB");
_print_parity(bp,
BRB1_REG_BRB1_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "PARSER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "PARSER");
_print_parity(bp, PRS_REG_PRS_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "TSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+ _print_next_block((*par_num)++, "TSDM");
_print_parity(bp,
TSDM_REG_TSDM_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++,
+ break;
+ case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+ _print_next_block((*par_num)++,
"SEARCHER");
_print_parity(bp, SRC_REG_SRC_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "TCM");
- _print_parity(bp,
- TCM_REG_TCM_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "TSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+ _print_next_block((*par_num)++, "TCM");
+ _print_parity(bp, TCM_REG_TCM_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "TSEMI");
_print_parity(bp,
TSEM_REG_TSEM_PRTY_STS_0);
_print_parity(bp,
TSEM_REG_TSEM_PRTY_STS_1);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "XPB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+ _print_next_block((*par_num)++, "XPB");
_print_parity(bp, GRCBASE_XPB +
PB_REG_PB_PRTY_STS);
+ break;
}
- break;
}
/* Clear the bit */
@@ -4330,53 +4511,59 @@ static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
- int par_num, bool *global,
+static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
+ int *par_num, bool *global,
bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
+ res |= true; /* Each bit is real error! */
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "PBF");
+ _print_next_block((*par_num)++, "PBF");
_print_parity(bp, PBF_REG_PBF_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "QM");
+ _print_next_block((*par_num)++, "QM");
_print_parity(bp, QM_REG_QM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "TM");
+ _print_next_block((*par_num)++, "TM");
_print_parity(bp, TM_REG_TM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "XSDM");
+ _print_next_block((*par_num)++, "XSDM");
_print_parity(bp,
XSDM_REG_XSDM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "XCM");
+ _print_next_block((*par_num)++, "XCM");
_print_parity(bp, XCM_REG_XCM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "XSEMI");
+ _print_next_block((*par_num)++,
+ "XSEMI");
_print_parity(bp,
XSEM_REG_XSEM_PRTY_STS_0);
_print_parity(bp,
@@ -4385,7 +4572,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
break;
case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"DOORBELLQ");
_print_parity(bp,
DORQ_REG_DORQ_PRTY_STS);
@@ -4393,7 +4580,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
break;
case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "NIG");
+ _print_next_block((*par_num)++, "NIG");
if (CHIP_IS_E1x(bp)) {
_print_parity(bp,
NIG_REG_NIG_PRTY_STS);
@@ -4407,32 +4594,34 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
break;
case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"VAUX PCI CORE");
*global = true;
break;
case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "DEBUG");
+ _print_next_block((*par_num)++,
+ "DEBUG");
_print_parity(bp, DBG_REG_DBG_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "USDM");
+ _print_next_block((*par_num)++, "USDM");
_print_parity(bp,
USDM_REG_USDM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "UCM");
+ _print_next_block((*par_num)++, "UCM");
_print_parity(bp, UCM_REG_UCM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "USEMI");
+ _print_next_block((*par_num)++,
+ "USEMI");
_print_parity(bp,
USEM_REG_USEM_PRTY_STS_0);
_print_parity(bp,
@@ -4441,21 +4630,21 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
break;
case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "UPB");
+ _print_next_block((*par_num)++, "UPB");
_print_parity(bp, GRCBASE_UPB +
PB_REG_PB_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "CSDM");
+ _print_next_block((*par_num)++, "CSDM");
_print_parity(bp,
CSDM_REG_CSDM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "CCM");
+ _print_next_block((*par_num)++, "CCM");
_print_parity(bp, CCM_REG_CCM_PRTY_STS);
}
break;
@@ -4466,80 +4655,73 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
- int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
+ int *par_num, bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- switch (cur_bit) {
- case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "CSEMI");
+ res |= true; /* Each bit is real error! */
+ if (print) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "CSEMI");
_print_parity(bp,
CSEM_REG_CSEM_PRTY_STS_0);
_print_parity(bp,
CSEM_REG_CSEM_PRTY_STS_1);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "PXP");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+ _print_next_block((*par_num)++, "PXP");
_print_parity(bp, PXP_REG_PXP_PRTY_STS);
_print_parity(bp,
PXP2_REG_PXP2_PRTY_STS_0);
_print_parity(bp,
PXP2_REG_PXP2_PRTY_STS_1);
- }
- break;
- case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++,
- "PXPPCICLOCKCLIENT");
- break;
- case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "CFC");
+ break;
+ case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "PXPPCICLOCKCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+ _print_next_block((*par_num)++, "CFC");
_print_parity(bp,
CFC_REG_CFC_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "CDU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+ _print_next_block((*par_num)++, "CDU");
_print_parity(bp, CDU_REG_CDU_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "DMAE");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+ _print_next_block((*par_num)++, "DMAE");
_print_parity(bp,
DMAE_REG_DMAE_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "IGU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+ _print_next_block((*par_num)++, "IGU");
if (CHIP_IS_E1x(bp))
_print_parity(bp,
HC_REG_HC_PRTY_STS);
else
_print_parity(bp,
IGU_REG_IGU_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "MISC");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+ _print_next_block((*par_num)++, "MISC");
_print_parity(bp,
MISC_REG_MISC_PRTY_STS);
+ break;
}
- break;
}
/* Clear the bit */
@@ -4547,40 +4729,49 @@ static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
- bool *global, bool print)
+static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
+ int *par_num, bool *global,
+ bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ bool res = false;
+ u32 cur_bit;
+ int i;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
if (print)
- _print_next_block(par_num++, "MCP ROM");
+ _print_next_block((*par_num)++,
+ "MCP ROM");
*global = true;
+ res |= true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"MCP UMP RX");
*global = true;
+ res |= true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"MCP UMP TX");
*global = true;
+ res |= true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"MCP SCPAD");
- *global = true;
+ /* clear latched SCPAD PATIRY from MCP */
+ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
+ 1UL << 10);
break;
}
@@ -4589,45 +4780,50 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
- int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
+ int *par_num, bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- switch (cur_bit) {
- case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "PGLUE_B");
+ res |= true; /* Each bit is real error! */
+ if (print) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "PGLUE_B");
_print_parity(bp,
- PGLUE_B_REG_PGLUE_B_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "ATC");
+ PGLUE_B_REG_PGLUE_B_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
+ _print_next_block((*par_num)++, "ATC");
_print_parity(bp,
ATC_REG_ATC_PRTY_STS);
+ break;
}
- break;
}
-
/* Clear the bit */
sig &= ~cur_bit;
}
}
- return par_num;
+ return res;
}
static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
u32 *sig)
{
+ bool res = false;
+
if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
(sig[1] & HW_PRTY_ASSERT_SET_1) ||
(sig[2] & HW_PRTY_ASSERT_SET_2) ||
@@ -4644,23 +4840,22 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
if (print)
netdev_err(bp->dev,
"Parity errors detected in blocks: ");
- par_num = bnx2x_check_blocks_with_parity0(bp,
- sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
- par_num = bnx2x_check_blocks_with_parity1(bp,
- sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
- par_num = bnx2x_check_blocks_with_parity2(bp,
- sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
- par_num = bnx2x_check_blocks_with_parity3(
- sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
- par_num = bnx2x_check_blocks_with_parity4(bp,
- sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
+ res |= bnx2x_check_blocks_with_parity0(bp,
+ sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
+ res |= bnx2x_check_blocks_with_parity1(bp,
+ sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
+ res |= bnx2x_check_blocks_with_parity2(bp,
+ sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
+ res |= bnx2x_check_blocks_with_parity3(bp,
+ sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
+ res |= bnx2x_check_blocks_with_parity4(bp,
+ sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
if (print)
pr_cont("\n");
+ }
- return true;
- } else
- return false;
+ return res;
}
/**
@@ -4687,6 +4882,14 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
attn.sig[3] = REG_RD(bp,
MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
port*4);
+ /* Since MCP attentions can't be disabled inside the block, we need to
+ * read AEU registers to see whether they're currently disabled
+ */
+ attn.sig[3] &= ((REG_RD(bp,
+ !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
+ : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
+ MISC_AEU_ENABLE_MCP_PRTY_BITS) |
+ ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
if (!CHIP_IS_E1x(bp))
attn.sig[4] = REG_RD(bp,
@@ -5069,9 +5272,9 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
__clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
/* mark latest Q bit */
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
/* send Q update ramrod for FCoE Q */
rc = bnx2x_queue_state_change(bp, &queue_params);
@@ -5148,14 +5351,14 @@ static void bnx2x_eq_int(struct bnx2x *bp)
/* handle eq element */
switch (opcode) {
case EVENT_RING_OPCODE_VF_PF_CHANNEL:
- DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
- bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
+ bnx2x_vf_mbx_schedule(bp,
+ &elem->message.data.vf_pf_event);
continue;
case EVENT_RING_OPCODE_STAT_QUERY:
- DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
- "got statistics comp event %d\n",
- bp->stats_comp++);
+ DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
+ "got statistics comp event %d\n",
+ bp->stats_comp++);
/* nothing to do with stats comp */
goto next_spqe;
@@ -5181,18 +5384,18 @@ static void bnx2x_eq_int(struct bnx2x *bp)
case EVENT_RING_OPCODE_STOP_TRAFFIC:
DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
if (f_obj->complete_cmd(bp, f_obj,
BNX2X_F_CMD_TX_STOP))
break;
- bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
goto next_spqe;
case EVENT_RING_OPCODE_START_TRAFFIC:
DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
if (f_obj->complete_cmd(bp, f_obj,
BNX2X_F_CMD_TX_START))
break;
- bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
goto next_spqe;
case EVENT_RING_OPCODE_FUNCTION_UPDATE:
@@ -5205,6 +5408,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
break;
} else {
+ int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
+
DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
"AFEX: ramrod completed FUNCTION_UPDATE\n");
f_obj->complete_cmd(bp, f_obj,
@@ -5214,12 +5419,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
* sp_rtnl task as all Queue SP operations
* should run under rtnl_lock.
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
-
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, cmd, 0);
}
goto next_spqe;
@@ -5300,7 +5500,7 @@ next_spqe:
spqe_cnt++;
} /* for */
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_add(spqe_cnt, &bp->eq_spq_left);
bp->eq_cons = sw_cons;
@@ -5367,13 +5567,6 @@ static void bnx2x_sp_task(struct work_struct *work)
le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
}
- /* must be called after the EQ processing (since eq leads to sriov
- * ramrod completion flows).
- * This flow may have been scheduled by the arrival of a ramrod
- * completion, or by the sriov code rescheduling itself.
- */
- bnx2x_iov_sp_task(bp);
-
/* afex - poll to check if VIFSET_ACK should be sent to MFW */
if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
&bp->sp_state)) {
@@ -5431,26 +5624,24 @@ static void bnx2x_timer(unsigned long data)
if (IS_PF(bp) &&
!BP_NOMCP(bp)) {
int mb_idx = BP_FW_MB_IDX(bp);
- u32 drv_pulse;
- u32 mcp_pulse;
+ u16 drv_pulse;
+ u16 mcp_pulse;
++bp->fw_drv_pulse_wr_seq;
bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
- /* TBD - add SYSTEM_TIME */
drv_pulse = bp->fw_drv_pulse_wr_seq;
bnx2x_drv_pulse(bp);
mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
MCP_PULSE_SEQ_MASK);
/* The delta between driver pulse and mcp response
- * should be 1 (before mcp response) or 0 (after mcp response)
+ * should not get too big. If the MFW is more than 5 pulses
+ * behind, we should worry about it enough to generate an error
+ * log.
*/
- if ((drv_pulse != mcp_pulse) &&
- (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
- /* someone lost a heartbeat... */
- BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+ if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
+ BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
drv_pulse, mcp_pulse);
- }
}
if (bp->state == BNX2X_STATE_OPEN)
@@ -5800,11 +5991,11 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
}
/* called with netif_addr_lock_bh() */
-int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags)
+static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags)
{
struct bnx2x_rx_mode_ramrod_params ramrod_param;
int rc;
@@ -5912,7 +6103,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
}
/* called with netif_addr_lock_bh() */
-int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
{
unsigned long rx_mode_flags = 0, ramrod_flags = 0;
unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
@@ -5939,18 +6130,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
{
int i;
- if (IS_MF_SI(bp))
- /*
- * In switch independent mode, the TSTORM needs to accept
- * packets that failed classification, since approximate match
- * mac addresses aren't written to NIG LLH
- */
- REG_WR8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
- else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
- REG_WR8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
-
/* Zero this manually as its initialization is
currently missing in the initTool */
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -6108,6 +6287,47 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp)
bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
}
+static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
+{
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+ unsigned long q_type = 0;
+
+ bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
+ bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
+ BNX2X_FCOE_ETH_CL_ID_IDX);
+ bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
+ bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
+ bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
+ bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
+ bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
+ fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
+ fp);
+
+ DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
+
+ /* qZone id equals to FW (per path) client id */
+ bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
+ /* init shortcut */
+ bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
+ bnx2x_rx_ustorm_prods_offset(fp);
+
+ /* Configure Queue State object */
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ /* No multi-CoS for FCoE L2 client */
+ BUG_ON(fp->max_cos != 1);
+
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
+ &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+ DP(NETIF_MSG_IFUP,
+ "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
+ fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
+}
+
void bnx2x_nic_init_cnic(struct bnx2x *bp)
{
if (!NO_FCOE(bp))
@@ -6877,7 +7097,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
- REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
+
if (!CHIP_REV_IS_SLOW(bp))
/* enable hw interrupt from doorbell Q */
REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
@@ -7104,7 +7324,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
int port = BP_PORT(bp);
int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
u32 low, high;
- u32 val;
+ u32 val, reg;
DP(NETIF_MSG_HW, "starting port init port %d\n", port);
@@ -7249,6 +7469,17 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
val |= CHIP_IS_E1(bp) ? 0 : 0x10;
REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
+ /* SCPAD_PARITY should NOT trigger close the gates */
+ reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
+ REG_WR(bp, reg,
+ REG_RD(bp, reg) &
+ ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
+ reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
+ REG_WR(bp, reg,
+ REG_RD(bp, reg) &
+ ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
bnx2x_init_block(bp, BLOCK_NIG, init_phase);
if (!CHIP_IS_E1x(bp)) {
@@ -7629,6 +7860,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, init_phase);
bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+ REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */
bnx2x_iov_init_dq(bp);
@@ -7839,12 +8071,15 @@ void bnx2x_free_mem(struct bnx2x *bp)
{
int i;
- BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
- sizeof(struct host_sp_status_block));
-
BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ if (IS_VF(bp))
+ return;
+
+ BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
+ sizeof(struct host_sp_status_block));
+
BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
@@ -7867,19 +8102,25 @@ void bnx2x_free_mem(struct bnx2x *bp)
int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
{
- if (!CHIP_IS_E1x(bp))
+ if (!CHIP_IS_E1x(bp)) {
/* size = the status block + ramrod buffers */
- BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
- &bp->cnic_sb_mapping,
- sizeof(struct
- host_hc_status_block_e1x));
+ bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ if (!bp->cnic_sb.e2_sb)
+ goto alloc_mem_err;
+ } else {
+ bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+ if (!bp->cnic_sb.e1x_sb)
+ goto alloc_mem_err;
+ }
- if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
+ if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
/* allocate searcher T2 table, as it wasn't allocated before */
- BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+ bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+ if (!bp->t2)
+ goto alloc_mem_err;
+ }
/* write address to which L5 should insert its values */
bp->cnic_eth_dev.addr_drv_info_to_mcp =
@@ -7900,15 +8141,22 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
{
int i, allocated, context_size;
- if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
+ if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
/* allocate searcher T2 table */
- BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+ bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+ if (!bp->t2)
+ goto alloc_mem_err;
+ }
- BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
- sizeof(struct host_sp_status_block));
+ bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
+ sizeof(struct host_sp_status_block));
+ if (!bp->def_status_blk)
+ goto alloc_mem_err;
- BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
- sizeof(struct bnx2x_slowpath));
+ bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
+ sizeof(struct bnx2x_slowpath));
+ if (!bp->slowpath)
+ goto alloc_mem_err;
/* Allocate memory for CDU context:
* This memory is allocated separately and not in the generic ILT
@@ -7928,12 +8176,16 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
for (i = 0, allocated = 0; allocated < context_size; i++) {
bp->context[i].size = min(CDU_ILT_PAGE_SZ,
(context_size - allocated));
- BNX2X_PCI_ALLOC(bp->context[i].vcxt,
- &bp->context[i].cxt_mapping,
- bp->context[i].size);
+ bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
+ bp->context[i].size);
+ if (!bp->context[i].vcxt)
+ goto alloc_mem_err;
allocated += bp->context[i].size;
}
- BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
+ bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
+ GFP_KERNEL);
+ if (!bp->ilt->lines)
+ goto alloc_mem_err;
if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
goto alloc_mem_err;
@@ -7942,11 +8194,15 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
goto alloc_mem_err;
/* Slow path ring */
- BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
+ bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
+ if (!bp->spq)
+ goto alloc_mem_err;
/* EQ */
- BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
- BCM_PAGE_SIZE * NUM_EQ_PAGES);
+ bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
+ BCM_PAGE_SIZE * NUM_EQ_PAGES);
+ if (!bp->eq_ring)
+ goto alloc_mem_err;
return 0;
@@ -8044,7 +8300,10 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
int bnx2x_setup_leading(struct bnx2x *bp)
{
- return bnx2x_setup_queue(bp, &bp->fp[0], 1);
+ if (IS_PF(bp))
+ return bnx2x_setup_queue(bp, &bp->fp[0], true);
+ else /* VF */
+ return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
}
/**
@@ -8058,8 +8317,10 @@ int bnx2x_set_int_mode(struct bnx2x *bp)
{
int rc = 0;
- if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX)
+ if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
+ BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
return -EINVAL;
+ }
switch (int_mode) {
case BNX2X_INT_MODE_MSIX:
@@ -8627,6 +8888,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
else if (bp->wol) {
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
u8 *mac_addr = bp->dev->dev_addr;
+ struct pci_dev *pdev = bp->pdev;
u32 val;
u16 pmc;
@@ -8643,9 +8905,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
/* Enable the PME and clear the status */
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
@@ -8659,16 +8921,16 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
int path = BP_PATH(bp);
DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- load_count[path][0]--;
- load_count[path][1 + port]--;
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ bnx2x_load_count[path][0]--;
+ bnx2x_load_count[path][1 + port]--;
DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- if (load_count[path][0] == 0)
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ if (bnx2x_load_count[path][0] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
- else if (load_count[path][1 + port] == 0)
+ else if (bnx2x_load_count[path][1 + port] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
else
reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -8721,6 +8983,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
synchronize_irq(bp->pdev->irq);
flush_workqueue(bnx2x_wq);
+ flush_workqueue(bnx2x_iov_wq);
while (bnx2x_func_get_state(bp, &bp->func_obj) !=
BNX2X_F_STATE_STARTED && tout--)
@@ -9283,6 +9546,10 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
bnx2x_process_kill_chip_reset(bp, global);
barrier();
+ /* clear errors in PGB */
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
+
/* Recover after reset: */
/* MCP */
if (global && bnx2x_reset_mcp_comp(bp, val))
@@ -9628,17 +9895,24 @@ sp_rtnl_not_reset:
}
}
- if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
- &bp->sp_rtnl_state)) {
- DP(BNX2X_MSG_SP,
- "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
- bnx2x_vfpf_storm_rx_mode(bp);
+ if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
+ bnx2x_set_rx_mode_inner(bp);
}
if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
&bp->sp_rtnl_state))
bnx2x_pf_set_vfs_vlan(bp);
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
+ bnx2x_dcbx_stop_hw_tx(bp);
+ bnx2x_dcbx_resume_hw_tx(bp);
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
+ &bp->sp_rtnl_state))
+ bnx2x_update_mng_version(bp);
+
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
@@ -9687,7 +9961,7 @@ period_task_exit:
* Init service functions
*/
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
{
u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
@@ -9774,6 +10048,82 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
+#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
+#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
+#define BCM_5710_UNDI_FW_MF_VERS (0x05)
+#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
+
+static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
+{
+ /* UNDI marks its presence in DORQ -
+ * it initializes CID offset for normal bell to 0x7
+ */
+ if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
+ MISC_REGISTERS_RESET_REG_1_RST_DORQ))
+ return false;
+
+ if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
+ BNX2X_DEV_INFO("UNDI previously loaded\n");
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
+{
+ u8 major, minor, version;
+ u32 fw;
+
+ /* Must check that FW is loaded */
+ if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
+ MISC_REGISTERS_RESET_REG_1_RST_XSEM)) {
+ BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n");
+ return false;
+ }
+
+ /* Read Currently loaded FW version */
+ fw = REG_RD(bp, XSEM_REG_PRAM);
+ major = fw & 0xff;
+ minor = (fw >> 0x8) & 0xff;
+ version = (fw >> 0x10) & 0xff;
+ BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n",
+ fw, major, minor, version);
+
+ if (major > BCM_5710_UNDI_FW_MF_MAJOR)
+ return true;
+
+ if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+ (minor > BCM_5710_UNDI_FW_MF_MINOR))
+ return true;
+
+ if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+ (minor == BCM_5710_UNDI_FW_MF_MINOR) &&
+ (version >= BCM_5710_UNDI_FW_MF_VERS))
+ return true;
+
+ return false;
+}
+
+static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp)
+{
+ int i;
+
+ /* Due to legacy (FW) code, the first function on each engine has a
+ * different offset macro from the rest of the functions.
+ * Setting this for all 8 functions is harmless regardless of whether
+ * this is actually a multi-function device.
+ */
+ for (i = 0; i < 2; i++)
+ REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1);
+
+ for (i = 2; i < 8; i++)
+ REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1);
+
+ BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n");
+}
+
static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
{
u16 rcq, bd;
@@ -9843,7 +10193,7 @@ static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
{
struct bnx2x_prev_path_list *tmp_list;
- int rc = false;
+ bool rc = false;
if (down_trylock(&bnx2x_prev_sem))
return false;
@@ -9935,8 +10285,6 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
static int bnx2x_do_flr(struct bnx2x *bp)
{
- int i;
- u16 status;
struct pci_dev *dev = bp->pdev;
if (CHIP_IS_E1x(bp)) {
@@ -9951,20 +10299,8 @@ static int bnx2x_do_flr(struct bnx2x *bp)
return -EINVAL;
}
- /* Wait for Transaction Pending bit clean */
- for (i = 0; i < 4; i++) {
- if (i)
- msleep((1 << (i - 1)) * 100);
-
- pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto clear;
- }
-
- dev_err(&dev->dev,
- "transaction is not cleared; proceeding with reset anyway\n");
-
-clear:
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
BNX2X_DEV_INFO("Initiating FLR\n");
bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
@@ -9984,11 +10320,15 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
BNX2X_DEV_INFO("Path is unmarked\n");
+ /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */
+ if (bnx2x_prev_is_after_undi(bp))
+ goto out;
+
/* If function has FLR capabilities, and existing FW version matches
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
*/
- rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
+ rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
if (!rc) {
/* fw version is good */
@@ -10004,6 +10344,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
BNX2X_DEV_INFO("Could not FLR\n");
+out:
/* Close the MCP request, return failure*/
rc = bnx2x_prev_mcp_done(bp);
if (!rc)
@@ -10034,6 +10375,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* Reset should be performed after BRB is emptied */
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
u32 timer_count = 1000;
+ bool need_write = true;
/* Close the MAC Rx to prevent BRB from filling up */
bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10041,19 +10383,13 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* close LLH filters towards the BRB */
bnx2x_set_rx_filter(&bp->link_params, 0);
- /* Check if the UNDI driver was previously loaded
- * UNDI driver initializes CID offset for normal bell to 0x7
- */
- if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
- tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
- if (tmp_reg == 0x7) {
- BNX2X_DEV_INFO("UNDI previously loaded\n");
- prev_undi = true;
- /* clear the UNDI indication */
- REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
- /* clear possible idle check errors */
- REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
- }
+ /* Check if the UNDI driver was previously loaded */
+ if (bnx2x_prev_is_after_undi(bp)) {
+ prev_undi = true;
+ /* clear the UNDI indication */
+ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+ /* clear possible idle check errors */
+ REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
}
if (!CHIP_IS_E1x(bp))
/* block FW from writing to host */
@@ -10076,10 +10412,20 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
else
timer_count--;
- /* If UNDI resides in memory, manually increment it */
- if (prev_undi)
+ /* New UNDI FW supports MF and contains better
+ * cleaning methods - might be redundant but harmless.
+ */
+ if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
+ if (need_write) {
+ bnx2x_prev_unload_undi_mf(bp);
+ need_write = false;
+ }
+ } else if (prev_undi) {
+ /* If UNDI resides in memory,
+ * manually increment it
+ */
bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
-
+ }
udelay(10);
}
@@ -10199,8 +10545,8 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
} while (--time_counter);
if (!time_counter || rc) {
- BNX2X_ERR("Failed unloading previous driver, aborting\n");
- rc = -EBUSY;
+ BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
+ rc = -EPROBE_DEFER;
}
/* Mark function if its port was used to boot from SAN */
@@ -10362,6 +10708,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
+
+ bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
+ BC_SUPPORTS_RMMOD_CMD : 0;
+
boot_mode = SHMEM_RD(bp,
dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -10380,7 +10730,7 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
break;
}
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+ pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
BNX2X_DEV_INFO("%sWoL capable\n",
@@ -11123,6 +11473,14 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
bnx2x_get_cnic_mac_hwinfo(bp);
}
+ if (!BP_NOMCP(bp)) {
+ /* Read physical port identifier from shmem */
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+ bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
+ bp->flags |= HAS_PHYS_PORT_ID;
+ }
+
memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
@@ -11137,6 +11495,9 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp)
int tmp;
u32 cfg;
+ if (IS_VF(bp))
+ return 0;
+
if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
/* Take function: tmp = func */
tmp = BP_ABS_FUNC(bp);
@@ -11366,9 +11727,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
}
}
- /* adjust igu_sb_cnt to MF for E1x */
- if (CHIP_IS_E1x(bp) && IS_MF(bp))
- bp->igu_sb_cnt /= E1HVN_MAX;
+ /* adjust igu_sb_cnt to MF for E1H */
+ if (CHIP_IS_E1H(bp) && IS_MF(bp))
+ bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
/* port info */
bnx2x_get_port_hwinfo(bp);
@@ -11523,11 +11884,15 @@ static int bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
+ mutex_init(&bp->drv_info_mutex);
+ bp->drv_info_mng_owner = false;
spin_lock_init(&bp->stats_lock);
+ sema_init(&bp->stats_sema, 1);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
+ INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
if (IS_PF(bp)) {
rc = bnx2x_get_hwinfo(bp);
if (rc)
@@ -11554,7 +11919,11 @@ static int bnx2x_init_bp(struct bnx2x *bp)
DRV_MSG_SEQ_NUMBER_MASK;
BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
- bnx2x_prev_unload(bp);
+ rc = bnx2x_prev_unload(bp);
+ if (rc) {
+ bnx2x_free_mem_bp(bp);
+ return rc;
+ }
}
if (CHIP_REV_IS_FPGA(bp))
@@ -11565,6 +11934,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->disable_tpa = disable_tpa;
bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
+ /* Reduce memory usage in kdump environment by disabling TPA */
+ bp->disable_tpa |= reset_devices;
/* Set TPA flags */
if (bp->disable_tpa) {
@@ -11630,9 +12001,11 @@ static int bnx2x_init_bp(struct bnx2x *bp)
* second status block for the L2 queue, and a third status block for
* CNIC if supported.
*/
- if (CNIC_SUPPORT(bp))
+ if (IS_VF(bp))
+ bp->min_msix_vec_cnt = 1;
+ else if (CNIC_SUPPORT(bp))
bp->min_msix_vec_cnt = 3;
- else
+ else /* PF w/o cnic */
bp->min_msix_vec_cnt = 2;
BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
@@ -11653,9 +12026,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
static int bnx2x_open(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- bool global = false;
- int other_engine = BP_PATH(bp) ? 0 : 1;
- bool other_load_status, load_status;
int rc;
bp->stats_init = true;
@@ -11671,6 +12041,10 @@ static int bnx2x_open(struct net_device *dev)
* Parity recovery is only relevant for PF driver.
*/
if (IS_PF(bp)) {
+ int other_engine = BP_PATH(bp) ? 0 : 1;
+ bool other_load_status, load_status;
+ bool global = false;
+
other_load_status = bnx2x_get_load_status(bp, other_engine);
load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
@@ -11714,7 +12088,7 @@ static int bnx2x_open(struct net_device *dev)
rc = bnx2x_nic_load(bp, LOAD_OPEN);
if (rc)
return rc;
- return bnx2x_open_epilog(bp);
+ return 0;
}
/* called with rtnl_lock */
@@ -11733,7 +12107,7 @@ static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
{
int mc_count = netdev_mc_count(bp->dev);
struct bnx2x_mcast_list_elem *mc_mac =
- kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
+ kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
struct netdev_hw_addr *ha;
if (!mc_mac)
@@ -11846,43 +12220,51 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
}
/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
-void bnx2x_set_rx_mode(struct net_device *dev)
+static void bnx2x_set_rx_mode(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- u32 rx_mode = BNX2X_RX_MODE_NORMAL;
if (bp->state != BNX2X_STATE_OPEN) {
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
return;
+ } else {
+ /* Schedule an SP task to handle rest of change */
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
+ NETIF_MSG_IFUP);
}
+}
+
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
+{
+ u32 rx_mode = BNX2X_RX_MODE_NORMAL;
DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
- if (dev->flags & IFF_PROMISC)
+ netif_addr_lock_bh(bp->dev);
+
+ if (bp->dev->flags & IFF_PROMISC) {
rx_mode = BNX2X_RX_MODE_PROMISC;
- else if ((dev->flags & IFF_ALLMULTI) ||
- ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
- CHIP_IS_E1(bp)))
+ } else if ((bp->dev->flags & IFF_ALLMULTI) ||
+ ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp))) {
rx_mode = BNX2X_RX_MODE_ALLMULTI;
- else {
+ } else {
if (IS_PF(bp)) {
/* some multicasts */
if (bnx2x_set_mc_list(bp) < 0)
rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ /* release bh lock, as bnx2x_set_uc_list might sleep */
+ netif_addr_unlock_bh(bp->dev);
if (bnx2x_set_uc_list(bp) < 0)
rx_mode = BNX2X_RX_MODE_PROMISC;
+ netif_addr_lock_bh(bp->dev);
} else {
/* configuring mcast to a vf involves sleeping (when we
- * wait for the pf's response). Since this function is
- * called from non sleepable context we must schedule
- * a work item for this purpose
+ * wait for the pf's response).
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp,
+ BNX2X_SP_RTNL_VFPF_MCAST, 0);
}
}
@@ -11894,22 +12276,20 @@ void bnx2x_set_rx_mode(struct net_device *dev)
/* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ netif_addr_unlock_bh(bp->dev);
return;
}
if (IS_PF(bp)) {
bnx2x_set_storm_rx_mode(bp);
+ netif_addr_unlock_bh(bp->dev);
} else {
- /* configuring rx mode to storms in a vf involves sleeping (when
- * we wait for the pf's response). Since this function is
- * called from non sleepable context we must schedule
- * a work item for this purpose
+ /* VF will need to request the PF to make this change, and so
+ * the VF needs to release the bottom-half lock prior to the
+ * request (as it will likely require sleep on the VF side)
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ netif_addr_unlock_bh(bp->dev);
+ bnx2x_vfpf_storm_rx_mode(bp);
}
}
@@ -12000,6 +12380,20 @@ static int bnx2x_validate_addr(struct net_device *dev)
return 0;
}
+static int bnx2x_get_phys_port_id(struct net_device *netdev,
+ struct netdev_phys_port_id *ppid)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ if (!(bp->flags & HAS_PHYS_PORT_ID))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(bp->phys_port_id);
+ memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
@@ -12026,22 +12420,18 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = bnx2x_low_latency_recv,
#endif
+ .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
{
struct device *dev = &bp->pdev->dev;
- if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
- bp->flags |= USING_DAC_FLAG;
- if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
- dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
- return -EIO;
- }
- } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
dev_err(dev, "System does not support DMA, aborting\n");
return -EIO;
}
@@ -12049,6 +12439,14 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
return 0;
}
+static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
+{
+ if (bp->flags & AER_ENABLED) {
+ pci_disable_pcie_error_reporting(bp->pdev);
+ bp->flags &= ~AER_ENABLED;
+ }
+}
+
static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
struct net_device *dev, unsigned long board_type)
{
@@ -12104,8 +12502,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
}
if (IS_PF(bp)) {
- bp->pm_cap = pdev->pm_cap;
- if (bp->pm_cap == 0) {
+ if (!pdev->pm_cap) {
dev_err(&bp->pdev->dev,
"Cannot find power management capability, aborting\n");
rc = -EIO;
@@ -12156,6 +12553,14 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
/* clean indirect addresses */
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET);
+
+ /* AER (Advanced Error reporting) configuration */
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (!rc)
+ bp->flags |= AER_ENABLED;
+ else
+ BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
+
/*
* Clean the following indirect addresses for all functions since it
* is not used by the driver.
@@ -12194,10 +12599,13 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!CHIP_IS_E1x(bp)) {
- dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_GSO_IPIP |
+ NETIF_F_GSO_SIT |
NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
}
@@ -12205,8 +12613,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
- if (bp->flags & USING_DAC_FLAG)
- dev->features |= NETIF_F_HIGHDMA;
+ dev->features |= NETIF_F_HIGHDMA;
/* Add Loopback capability to the device */
dev->hw_features |= NETIF_F_LOOPBACK;
@@ -12231,34 +12638,11 @@ err_out_release:
err_out_disable:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
return rc;
}
-static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width,
- enum bnx2x_pci_bus_speed *speed)
-{
- u32 link_speed, val = 0;
-
- pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
- *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
-
- link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
-
- switch (link_speed) {
- case 3:
- *speed = BNX2X_PCI_LINK_SPEED_8000;
- break;
- case 2:
- *speed = BNX2X_PCI_LINK_SPEED_5000;
- break;
- default:
- *speed = BNX2X_PCI_LINK_SPEED_2500;
- }
-}
-
static int bnx2x_check_firmware(struct bnx2x *bp)
{
const struct firmware *firmware = bp->firmware;
@@ -12531,19 +12915,16 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
* @dev: pci device
*
*/
-static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
- int cnic_cnt, bool is_vf)
+static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
{
- int pos, index;
+ int index;
u16 control = 0;
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
-
/*
* If MSI-X is not supported - return number of SBs needed to support
* one fast path queue: one FP queue + SB for CNIC
*/
- if (!pos) {
+ if (!pdev->msix_cap) {
dev_info(&pdev->dev, "no msix capability found\n");
return 1 + cnic_cnt;
}
@@ -12556,11 +12937,11 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
* without the default SB.
* For VFs there is no default SB, then we return (index+1).
*/
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
index = control & PCI_MSIX_FLAGS_QSIZE;
- return is_vf ? index + 1 : index;
+ return index;
}
static int set_max_cos_est(int chip_id)
@@ -12572,24 +12953,24 @@ static int set_max_cos_est(int chip_id)
return BNX2X_MULTI_TX_COS_E1X;
case BCM57712:
case BCM57712_MF:
- case BCM57712_VF:
return BNX2X_MULTI_TX_COS_E2_E3A0;
case BCM57800:
case BCM57800_MF:
- case BCM57800_VF:
case BCM57810:
case BCM57810_MF:
case BCM57840_4_10:
case BCM57840_2_20:
case BCM57840_O:
case BCM57840_MFO:
- case BCM57810_VF:
case BCM57840_MF:
- case BCM57840_VF:
case BCM57811:
case BCM57811_MF:
- case BCM57811_VF:
return BNX2X_MULTI_TX_COS_E3B0;
+ case BCM57712_VF:
+ case BCM57800_VF:
+ case BCM57810_VF:
+ case BCM57840_VF:
+ case BCM57811_VF:
return 1;
default:
pr_err("Unknown board_type (%d), aborting\n", chip_id);
@@ -12611,15 +12992,13 @@ static int set_is_vf(int chip_id)
}
}
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
-
static int bnx2x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *dev = NULL;
struct bnx2x *bp;
- int pcie_width;
- enum bnx2x_pci_bus_speed pcie_speed;
+ enum pcie_link_width pcie_width;
+ enum pci_bus_speed pcie_speed;
int rc, max_non_def_sbs;
int rx_count, tx_count, rss_count, doorbell_size;
int max_cos_est;
@@ -12640,10 +13019,13 @@ static int bnx2x_init_one(struct pci_dev *pdev,
is_vf = set_is_vf(ent->driver_data);
cnic_cnt = is_vf ? 0 : 1;
- max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf);
+ max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
+
+ /* add another SB for VF as it has no default SB */
+ max_non_def_sbs += is_vf ? 1 : 0;
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
- rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt;
+ rss_count = max_non_def_sbs - cnic_cnt;
if (rss_count < 1)
return -EINVAL;
@@ -12765,24 +13147,27 @@ static int bnx2x_init_one(struct pci_dev *pdev,
dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
rtnl_unlock();
}
-
- bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
- BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
- pcie_width, pcie_speed);
-
- BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+ if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
+ pcie_speed == PCI_SPEED_UNKNOWN ||
+ pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
+ BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
+ else
+ BNX2X_DEV_INFO(
+ "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
board_info[ent->driver_data].name,
(CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
pcie_width,
- pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" :
- pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" :
- pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" :
+ pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
+ pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
+ pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
"Unknown",
dev->base_addr, bp->pdev->irq, dev->dev_addr);
return 0;
init_one_exit:
+ bnx2x_disable_pcie_error_reporting(bp);
+
if (bp->regview)
iounmap(bp->regview);
@@ -12795,7 +13180,6 @@ init_one_exit:
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return rc;
}
@@ -12817,13 +13201,17 @@ static void __bnx2x_remove(struct pci_dev *pdev,
bnx2x_dcbnl_update_applist(bp, true);
#endif
+ if (IS_PF(bp) &&
+ !BP_NOMCP(bp) &&
+ (bp->flags & BC_SUPPORTS_RMMOD_CMD))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
+
/* Close the interface - either directly or implicitly */
if (remove_netdev) {
unregister_netdev(dev);
} else {
rtnl_lock();
- if (netif_running(dev))
- bnx2x_close(dev);
+ dev_close(dev);
rtnl_unlock();
}
@@ -12853,28 +13241,31 @@ static void __bnx2x_remove(struct pci_dev *pdev,
pci_set_power_state(pdev, PCI_D3hot);
}
- if (bp->regview)
- iounmap(bp->regview);
+ bnx2x_disable_pcie_error_reporting(bp);
+ if (remove_netdev) {
+ if (bp->regview)
+ iounmap(bp->regview);
- /* for vf doorbells are part of the regview and were unmapped along with
- * it. FW is only loaded by PF.
- */
- if (IS_PF(bp)) {
- if (bp->doorbells)
- iounmap(bp->doorbells);
+ /* For vfs, doorbells are part of the regview and were unmapped
+ * along with it. FW is only loaded by PF.
+ */
+ if (IS_PF(bp)) {
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
- bnx2x_release_firmware(bp);
- }
- bnx2x_free_mem_bp(bp);
+ bnx2x_release_firmware(bp);
+ } else {
+ bnx2x_vf_pci_dealloc(bp);
+ }
+ bnx2x_free_mem_bp(bp);
- if (remove_netdev)
free_netdev(dev);
- if (atomic_read(&pdev->enable_cnt) == 1)
- pci_release_regions(pdev);
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ }
}
static void bnx2x_remove_one(struct pci_dev *pdev)
@@ -12909,8 +13300,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer);
- cancel_delayed_work(&bp->sp_task);
- cancel_delayed_work(&bp->period_task);
+ cancel_delayed_work_sync(&bp->sp_task);
+ cancel_delayed_work_sync(&bp->period_task);
spin_lock_bh(&bp->stats_lock);
bp->stats_state = STATS_STATE_DISABLED;
@@ -13031,6 +13422,14 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
rtnl_unlock();
+ /* If AER, perform cleanup of the PCIe registers */
+ if (bp->flags & AER_ENABLED) {
+ if (pci_cleanup_aer_uncorrect_error_status(pdev))
+ BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
+ else
+ DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
+ }
+
return PCI_ERS_RESULT_RECOVERED;
}
@@ -13118,11 +13517,18 @@ static int __init bnx2x_init(void)
pr_err("Cannot create workqueue\n");
return -ENOMEM;
}
+ bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
+ if (!bnx2x_iov_wq) {
+ pr_err("Cannot create iov workqueue\n");
+ destroy_workqueue(bnx2x_wq);
+ return -ENOMEM;
+ }
ret = pci_register_driver(&bnx2x_pci_driver);
if (ret) {
pr_err("Cannot register driver\n");
destroy_workqueue(bnx2x_wq);
+ destroy_workqueue(bnx2x_iov_wq);
}
return ret;
}
@@ -13134,6 +13540,7 @@ static void __exit bnx2x_cleanup(void)
pci_unregister_driver(&bnx2x_pci_driver);
destroy_workqueue(bnx2x_wq);
+ destroy_workqueue(bnx2x_iov_wq);
/* Free globally allocated resources */
list_for_each_safe(pos, q, &bnx2x_prev_list) {
@@ -13485,9 +13892,9 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
int count = ctl->data.credit.credit_count;
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_add(count, &bp->cq_spq_left);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
break;
}
case DRV_CTL_ULP_REGISTER_CMD: {
@@ -13527,6 +13934,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
REG_WR(bp, scratch_offset + i,
*(host_addr + i/4));
}
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
break;
}
@@ -13544,6 +13952,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
}
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
break;
}
@@ -13591,6 +14000,10 @@ void bnx2x_setup_cnic_info(struct bnx2x *bp)
cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+ DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
+ BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
+ cp->iscsi_l2_cid);
+
if (NO_ISCSI_OOO(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
}
@@ -13645,6 +14058,9 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
rcu_assign_pointer(bp->cnic_ops, ops);
+ /* Schedule driver to read CNIC driver versions */
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+
return 0;
}
@@ -13665,7 +14081,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
return 0;
}
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
@@ -13715,7 +14131,7 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
return cp;
}
-u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
{
struct bnx2x *bp = fp->bp;
u32 offset = BAR_USTRORM_INTMEM;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 8e627b886d7..2beb5430b87 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2864,6 +2864,17 @@
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434
#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438
+/* [W 7] Writing 1 to each bit in this register clears a corresponding error
+ * details register and enables logging new error details. Bit 0 - clears
+ * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears
+ * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS
+ * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32
+ * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 -
+ * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears
+ * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6
+ * - clears TCPL_IN_TWO_RCBS_DETAILS. */
+#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x943c
+
/* [R 9] Interrupt register #0 read */
#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298
/* [RC 9] Interrupt register #0 read clear */
@@ -5921,6 +5932,7 @@
#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
+#define MISC_REGISTERS_RESET_REG_1_RST_XSEM (0x1<<22)
#define MISC_REGISTERS_RESET_REG_1_SET 0x584
#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24)
@@ -6335,6 +6347,7 @@
#define PCI_ID_VAL2 0x438
#define PCI_ID_VAL3 0x43c
+#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C
#define GRC_CONFIG_REG_PF_INIT_VF 0x624
#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
/* First VF_NUM for PF is encoded in this register.
@@ -7167,6 +7180,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
+#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
#define MDIO_WC_REG_XGXS_STATUS3 0x8129
#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8f03c984550..b1936044767 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Vladislav Zolotarov
*
*/
@@ -159,16 +159,6 @@ static inline void __bnx2x_exe_queue_reset_pending(
}
}
-static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
- struct bnx2x_exe_queue_obj *o)
-{
- spin_lock_bh(&o->lock);
-
- __bnx2x_exe_queue_reset_pending(bp, o);
-
- spin_unlock_bh(&o->lock);
-}
-
/**
* bnx2x_exe_queue_step - execute one execution chunk atomically
*
@@ -176,7 +166,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
* @o: queue
* @ramrod_flags: flags
*
- * (Atomicity is ensured using the exe_queue->lock).
+ * (Should be called while holding the exe_queue->lock).
*/
static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
struct bnx2x_exe_queue_obj *o,
@@ -187,8 +177,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
memset(&spacer, 0, sizeof(spacer));
- spin_lock_bh(&o->lock);
-
/* Next step should not be performed until the current is finished,
* unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
* properly clear object internals without sending any command to the FW
@@ -200,7 +188,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
__bnx2x_exe_queue_reset_pending(bp, o);
} else {
- spin_unlock_bh(&o->lock);
return 1;
}
}
@@ -228,10 +215,8 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
}
/* Sanity check */
- if (!cur_len) {
- spin_unlock_bh(&o->lock);
+ if (!cur_len)
return 0;
- }
rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
if (rc < 0)
@@ -245,7 +230,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
*/
__bnx2x_exe_queue_reset_pending(bp, o);
- spin_unlock_bh(&o->lock);
return rc;
}
@@ -274,16 +258,16 @@ static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(o->state, o->pstate);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(o->state, o->pstate);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
/**
@@ -371,23 +355,6 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->get(vp, 1);
}
-
-static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
-{
- struct bnx2x_credit_pool_obj *mp = o->macs_pool;
- struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
-
- if (!mp->get(mp, 1))
- return false;
-
- if (!vp->get(vp, 1)) {
- mp->put(mp, 1);
- return false;
- }
-
- return true;
-}
-
static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
{
struct bnx2x_credit_pool_obj *mp = o->macs_pool;
@@ -416,20 +383,189 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->put(vp, 1);
}
-static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+/**
+ * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details: Non-blocking implementation; should be called under execution
+ * queue lock.
+ */
+static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
{
- struct bnx2x_credit_pool_obj *mp = o->macs_pool;
- struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+ if (o->head_reader) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
+ return -EBUSY;
+ }
- if (!mp->put(mp, 1))
- return false;
+ DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
+ return 0;
+}
- if (!vp->put(vp, 1)) {
- mp->get(mp, 1);
- return false;
+/**
+ * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock; notice it might release
+ * and reclaim it during its run.
+ */
+static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ int rc;
+ unsigned long ramrod_flags = o->saved_ramrod_flags;
+
+ DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
+ ramrod_flags);
+ o->head_exe_request = false;
+ o->saved_ramrod_flags = 0;
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
+ if (rc != 0) {
+ BNX2X_ERR("execution of pending commands failed with rc %d\n",
+ rc);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
}
+}
- return true;
+/**
+ * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ * @ramrod_flags: ramrod flags of missed execution
+ *
+ * @details Should be called under execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long ramrod_flags)
+{
+ o->head_exe_request = true;
+ o->saved_ramrod_flags = ramrod_flags;
+ DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
+ ramrod_flags);
+}
+
+/**
+ * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ * execution exists, it would perform it - possibly releasing and
+ * reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ /* It's possible a new pending execution was added since this writer
+ * executed. If so, execute again. [Ad infinitum]
+ */
+ while (o->head_exe_request) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
+ __bnx2x_vlan_mac_h_exec_pending(bp, o);
+ }
+}
+
+
+/**
+ * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under the execution queue lock. May sleep. May
+ * release and reclaim execution queue lock during its run.
+ */
+static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ /* If we got here, we're holding lock --> no WRITER exists */
+ o->head_reader++;
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
+ o->head_reader);
+
+ return 0;
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details May sleep. Claims and releases execution queue lock during its run.
+ */
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ int rc;
+
+ spin_lock_bh(&o->exe_queue.lock);
+ rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
+ spin_unlock_bh(&o->exe_queue.lock);
+
+ return rc;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ * execution exists, it would be performed if this was the last
+ * reader. possibly releasing and reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ if (!o->head_reader) {
+ BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+ } else {
+ o->head_reader--;
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
+ o->head_reader);
+ }
+
+ /* It's possible a new pending execution was added, and that this reader
+ * was last - if so we need to execute the command.
+ */
+ if (!o->head_reader && o->head_exe_request) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
+
+ /* Writer release will do the trick */
+ __bnx2x_vlan_mac_h_write_unlock(bp, o);
+ }
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Notice if a pending execution exists, it would be performed if this
+ * was the last reader. Claims and releases the execution queue lock
+ * during its run.
+ */
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ spin_lock_bh(&o->exe_queue.lock);
+ __bnx2x_vlan_mac_h_read_unlock(bp, o);
+ spin_unlock_bh(&o->exe_queue.lock);
}
static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
@@ -438,6 +574,12 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
struct bnx2x_vlan_mac_registry_elem *pos;
u8 *next = base;
int counter = 0;
+ int read_lock;
+
+ DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+ if (read_lock != 0)
+ BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
/* traverse list */
list_for_each_entry(pos, &o->head, link) {
@@ -449,6 +591,12 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
next += stride + size;
}
}
+
+ if (read_lock == 0) {
+ DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
+ }
+
return counter * ETH_ALEN;
}
@@ -466,7 +614,7 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,
/* Check if a requested MAC already exists */
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
+ if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
(data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return -EEXIST;
@@ -488,26 +636,6 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp,
return 0;
}
-static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- union bnx2x_classification_ramrod_data *data)
-{
- struct bnx2x_vlan_mac_registry_elem *pos;
-
- DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
- data->vlan_mac.mac, data->vlan_mac.vlan);
-
- list_for_each_entry(pos, &o->head, link)
- if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
- (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)) &&
- (data->vlan_mac.is_inner_mac ==
- pos->u.vlan_mac.is_inner_mac))
- return -EEXIST;
-
- return 0;
-}
-
/* check_del() callbacks */
static struct bnx2x_vlan_mac_registry_elem *
bnx2x_check_mac_del(struct bnx2x *bp,
@@ -519,7 +647,7 @@ static struct bnx2x_vlan_mac_registry_elem *
DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
list_for_each_entry(pos, &o->head, link)
- if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
+ if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
(data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return pos;
@@ -542,27 +670,6 @@ static struct bnx2x_vlan_mac_registry_elem *
return NULL;
}
-static struct bnx2x_vlan_mac_registry_elem *
- bnx2x_check_vlan_mac_del(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- union bnx2x_classification_ramrod_data *data)
-{
- struct bnx2x_vlan_mac_registry_elem *pos;
-
- DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
- data->vlan_mac.mac, data->vlan_mac.vlan);
-
- list_for_each_entry(pos, &o->head, link)
- if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
- (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)) &&
- (data->vlan_mac.is_inner_mac ==
- pos->u.vlan_mac.is_inner_mac))
- return pos;
-
- return NULL;
-}
-
/* check_move() callback */
static bool bnx2x_check_move(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *src_o,
@@ -614,8 +721,8 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
return rx_tx_flag;
}
-void bnx2x_set_mac_in_nig(struct bnx2x *bp,
- bool add, unsigned char *dev_addr, int index)
+static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+ bool add, unsigned char *dev_addr, int index)
{
u32 wb_data[2];
u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
@@ -929,100 +1036,6 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
rule_cnt);
}
-static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- struct bnx2x_exeq_elem *elem,
- int rule_idx, int cam_offset)
-{
- struct bnx2x_raw_obj *raw = &o->raw;
- struct eth_classify_rules_ramrod_data *data =
- (struct eth_classify_rules_ramrod_data *)(raw->rdata);
- int rule_cnt = rule_idx + 1;
- union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
- enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
- bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
- u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
- u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
-
- /* Reset the ramrod data buffer for the first rule */
- if (rule_idx == 0)
- memset(data, 0, sizeof(*data));
-
- /* Set a rule header */
- bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
- &rule_entry->pair.header);
-
- /* Set VLAN and MAC themselves */
- rule_entry->pair.vlan = cpu_to_le16(vlan);
- bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
- &rule_entry->pair.mac_mid,
- &rule_entry->pair.mac_lsb, mac);
- rule_entry->pair.inner_mac =
- cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
- /* MOVE: Add a rule that will add this MAC to the target Queue */
- if (cmd == BNX2X_VLAN_MAC_MOVE) {
- rule_entry++;
- rule_cnt++;
-
- /* Setup ramrod data */
- bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
- elem->cmd_data.vlan_mac.target_obj,
- true, CLASSIFY_RULE_OPCODE_PAIR,
- &rule_entry->pair.header);
-
- /* Set a VLAN itself */
- rule_entry->pair.vlan = cpu_to_le16(vlan);
- bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
- &rule_entry->pair.mac_mid,
- &rule_entry->pair.mac_lsb, mac);
- rule_entry->pair.inner_mac =
- cpu_to_le16(elem->cmd_data.vlan_mac.u.
- vlan_mac.is_inner_mac);
- }
-
- /* Set the ramrod data header */
- /* TODO: take this to the higher level in order to prevent multiple
- writing */
- bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
- rule_cnt);
-}
-
-/**
- * bnx2x_set_one_vlan_mac_e1h -
- *
- * @bp: device handle
- * @o: bnx2x_vlan_mac_obj
- * @elem: bnx2x_exeq_elem
- * @rule_idx: rule_idx
- * @cam_offset: cam_offset
- */
-static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- struct bnx2x_exeq_elem *elem,
- int rule_idx, int cam_offset)
-{
- struct bnx2x_raw_obj *raw = &o->raw;
- struct mac_configuration_cmd *config =
- (struct mac_configuration_cmd *)(raw->rdata);
- /* 57710 and 57711 do not support MOVE command,
- * so it's either ADD or DEL
- */
- bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
- true : false;
-
- /* Reset the ramrod data buffer */
- memset(config, 0, sizeof(*config));
-
- bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
- cam_offset, add,
- elem->cmd_data.vlan_mac.u.vlan_mac.mac,
- elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
- ETH_VLAN_FILTER_CLASSIFY, config);
-}
-
-#define list_next_entry(pos, member) \
- list_entry((pos)->member.next, typeof(*(pos)), member)
-
/**
* bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
*
@@ -1122,24 +1135,6 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
return NULL;
}
-static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
- struct bnx2x_exe_queue_obj *o,
- struct bnx2x_exeq_elem *elem)
-{
- struct bnx2x_exeq_elem *pos;
- struct bnx2x_vlan_mac_ramrod_data *data =
- &elem->cmd_data.vlan_mac.u.vlan_mac;
-
- /* Check pending for execution commands */
- list_for_each_entry(pos, &o->exe_queue, link)
- if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
- sizeof(*data)) &&
- (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
- return pos;
-
- return NULL;
-}
-
/**
* bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
*
@@ -1397,6 +1392,32 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
return -EBUSY;
}
+static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long *ramrod_flags)
+{
+ int rc = 0;
+
+ spin_lock_bh(&o->exe_queue.lock);
+
+ DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
+ rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
+
+ if (rc != 0) {
+ __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
+
+ /* Calling function should not diffrentiate between this case
+ * and the case in which there is already a pending ramrod
+ */
+ rc = 1;
+ } else {
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ }
+ spin_unlock_bh(&o->exe_queue.lock);
+
+ return rc;
+}
+
/**
* bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
*
@@ -1414,19 +1435,27 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
struct bnx2x_raw_obj *r = &o->raw;
int rc;
+ /* Clearing the pending list & raw state should be made
+ * atomically (as execution flow assumes they represent the same).
+ */
+ spin_lock_bh(&o->exe_queue.lock);
+
/* Reset pending list */
- bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
+ __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
/* Clear pending */
r->clear_pending(r);
+ spin_unlock_bh(&o->exe_queue.lock);
+
/* If ramrod failed this is most likely a SW bug */
if (cqe->message.error)
return -EINVAL;
/* Run the next bulk of pending commands if requested */
if (test_bit(RAMROD_CONT, ramrod_flags)) {
- rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
+
if (rc < 0)
return rc;
}
@@ -1719,9 +1748,8 @@ static inline int bnx2x_vlan_mac_push_new_cmd(
* @p:
*
*/
-int bnx2x_config_vlan_mac(
- struct bnx2x *bp,
- struct bnx2x_vlan_mac_ramrod_params *p)
+int bnx2x_config_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p)
{
int rc = 0;
struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
@@ -1752,7 +1780,8 @@ int bnx2x_config_vlan_mac(
/* Execute commands if required */
if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
- rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
+ &p->ramrod_flags);
if (rc < 0)
return rc;
}
@@ -1775,8 +1804,9 @@ int bnx2x_config_vlan_mac(
return rc;
/* Make a next step */
- rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
- ramrod_flags);
+ rc = __bnx2x_vlan_mac_execute_step(bp,
+ p->vlan_mac_obj,
+ &p->ramrod_flags);
if (rc < 0)
return rc;
}
@@ -1806,18 +1836,21 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
unsigned long *ramrod_flags)
{
struct bnx2x_vlan_mac_registry_elem *pos = NULL;
- int rc = 0;
struct bnx2x_vlan_mac_ramrod_params p;
struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+ unsigned long flags;
+ int read_lock;
+ int rc = 0;
/* Clear pending commands first */
spin_lock_bh(&exeq->lock);
list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
- if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
- *vlan_mac_flags) {
+ flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
+ if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
rc = exeq->remove(bp, exeq->owner, exeq_pos);
if (rc) {
BNX2X_ERR("Failed to remove command\n");
@@ -1844,18 +1877,29 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
__clear_bit(RAMROD_CONT, &p.ramrod_flags);
+ DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+ if (read_lock != 0)
+ return read_lock;
+
list_for_each_entry(pos, &o->head, link) {
- if (pos->vlan_mac_flags == *vlan_mac_flags) {
+ flags = pos->vlan_mac_flags;
+ if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
rc = bnx2x_config_vlan_mac(bp, &p);
if (rc < 0) {
BNX2X_ERR("Failed to add a new DEL command\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
return rc;
}
}
}
+ DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
+
p.ramrod_flags = *ramrod_flags;
__set_bit(RAMROD_CONT, &p.ramrod_flags);
@@ -1887,6 +1931,9 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
struct bnx2x_credit_pool_obj *vlans_pool)
{
INIT_LIST_HEAD(&o->head);
+ o->head_reader = 0;
+ o->head_exe_request = false;
+ o->saved_ramrod_flags = 0;
o->macs_pool = macs_pool;
o->vlans_pool = vlans_pool;
@@ -1995,69 +2042,6 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
}
}
-void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac_obj,
- u8 cl_id, u32 cid, u8 func_id, void *rdata,
- dma_addr_t rdata_mapping, int state,
- unsigned long *pstate, bnx2x_obj_type type,
- struct bnx2x_credit_pool_obj *macs_pool,
- struct bnx2x_credit_pool_obj *vlans_pool)
-{
- union bnx2x_qable_obj *qable_obj =
- (union bnx2x_qable_obj *)vlan_mac_obj;
-
- bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
- rdata_mapping, state, pstate, type,
- macs_pool, vlans_pool);
-
- /* CAM pool handling */
- vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
- vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
- /* CAM offset is relevant for 57710 and 57711 chips only which have a
- * single CAM for both MACs and VLAN-MAC pairs. So the offset
- * will be taken from MACs' pool object only.
- */
- vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
- vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
-
- if (CHIP_IS_E1(bp)) {
- BNX2X_ERR("Do not support chips others than E2\n");
- BUG();
- } else if (CHIP_IS_E1H(bp)) {
- vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
- vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
- vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
- vlan_mac_obj->check_move = bnx2x_check_move_always_err;
- vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
-
- /* Exe Queue */
- bnx2x_exe_queue_init(bp,
- &vlan_mac_obj->exe_queue, 1, qable_obj,
- bnx2x_validate_vlan_mac,
- bnx2x_remove_vlan_mac,
- bnx2x_optimize_vlan_mac,
- bnx2x_execute_vlan_mac,
- bnx2x_exeq_get_vlan_mac);
- } else {
- vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
- vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
- vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
- vlan_mac_obj->check_move = bnx2x_check_move;
- vlan_mac_obj->ramrod_cmd =
- RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
-
- /* Exe Queue */
- bnx2x_exe_queue_init(bp,
- &vlan_mac_obj->exe_queue,
- CLASSIFY_RULES_COUNT,
- qable_obj, bnx2x_validate_vlan_mac,
- bnx2x_remove_vlan_mac,
- bnx2x_optimize_vlan_mac,
- bnx2x_execute_vlan_mac,
- bnx2x_exeq_get_vlan_mac);
- }
-}
-
/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
static inline void __storm_memset_mac_filters(struct bnx2x *bp,
struct tstorm_eth_mac_filter_config *mac_filters,
@@ -2147,7 +2131,7 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
/* The operation is completed */
clear_bit(p->state, p->pstate);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return 0;
}
@@ -2293,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->header.rule_cnt, p->rx_accept_flags,
p->tx_accept_flags);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -2998,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
raw->clear_pending(raw);
return 0;
} else {
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -3482,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
raw->clear_pending(raw);
return 0;
} else {
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -3592,16 +3576,16 @@ error_exit1:
static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(o->sched_state, o->raw.pstate);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(o->sched_state, o->raw.pstate);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
@@ -4107,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
}
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -4140,8 +4124,11 @@ int bnx2x_config_rss(struct bnx2x *bp,
struct bnx2x_raw_obj *r = &o->raw;
/* Do nothing if only driver cleanup was requested */
- if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
+ p->ramrod_flags);
return 0;
+ }
r->set_pending(r);
@@ -4213,7 +4200,7 @@ int bnx2x_queue_state_change(struct bnx2x *bp,
if (rc) {
o->next_state = BNX2X_Q_STATE_MAX;
clear_bit(pending_bit, pending);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return rc;
}
@@ -4301,7 +4288,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
wmb();
clear_bit(cmd, &o->pending);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return 0;
}
@@ -4590,13 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
/* Fill the ramrod data */
bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4618,13 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
bnx2x_q_fill_setup_data_e2(bp, params, rdata);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4662,13 +4647,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
o->cids[cid_index], rdata->general.client_id,
rdata->general.sp_client_id, rdata->general.cos);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4731,6 +4715,13 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp,
test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
+
+ /* tx switching */
+ data->tx_switching_flg =
+ test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, &params->update_flags);
+ data->tx_switching_change_flg =
+ test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ &params->update_flags);
}
static inline int bnx2x_q_send_update(struct bnx2x *bp,
@@ -4756,13 +4747,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
/* Fill the ramrod data */
bnx2x_q_fill_update_data(bp, o, update_params, rdata);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
o->cids[cid_index], U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4809,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp,
return bnx2x_q_send_update(bp, params);
}
+static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj,
+ struct bnx2x_queue_update_tpa_params *params,
+ struct tpa_update_ramrod_data *data)
+{
+ data->client_id = obj->cl_id;
+ data->complete_on_both_clients = params->complete_on_both_clients;
+ data->dont_verify_rings_pause_thr_flg =
+ params->dont_verify_thr;
+ data->max_agg_size = cpu_to_le16(params->max_agg_sz);
+ data->max_sges_for_packet = params->max_sges_pkt;
+ data->max_tpa_queues = params->max_tpa_queues;
+ data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
+ data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
+ data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
+ data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
+ data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
+ data->tpa_mode = params->tpa_mode;
+ data->update_ipv4 = params->update_ipv4;
+ data->update_ipv6 = params->update_ipv6;
+}
+
static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
struct bnx2x_queue_state_params *params)
{
- /* TODO: Not implemented yet. */
- return -1;
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct tpa_update_ramrod_data *rdata =
+ (struct tpa_update_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_queue_update_tpa_params *update_tpa_params =
+ &params->params.update_tpa;
+ u16 type;
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
+
+ /* Add the function id inside the type, so that sp post function
+ * doesn't automatically add the PF func-id, this is required
+ * for operations done by PFs on behalf of their VFs
+ */
+ type = ETH_CONNECTION_TYPE |
+ ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
+
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
+ o->cids[BNX2X_PRIMARY_CID_INDEX],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), type);
}
static inline int bnx2x_q_send_halt(struct bnx2x *bp,
@@ -5238,7 +5279,7 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
wmb();
clear_bit(cmd, &o->pending);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return 0;
}
@@ -5643,6 +5684,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
rdata->tx_switch_suspend = switch_update_params->suspend;
rdata->echo = SWITCH_UPDATE;
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
@@ -5670,11 +5717,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
rdata->allowed_priorities = afex_update_params->allowed_priorities;
rdata->echo = AFEX_UPDATE;
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
DP(BNX2X_MSG_SP,
"afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
@@ -5759,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
rdata->traffic_type_to_priority_cos[i] =
tx_start_params->traffic_type_to_priority_cos[i];
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
@@ -5873,7 +5926,7 @@ int bnx2x_func_state_change(struct bnx2x *bp,
if (rc) {
o->next_state = BNX2X_F_STATE_MAX;
clear_bit(cmd, pending);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 798dfe99673..718ecd29466 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Vladislav Zolotarov
*
*/
@@ -266,6 +266,13 @@ enum {
BNX2X_DONT_CONSUME_CAM_CREDIT,
BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
};
+/* When looking for matching filters, some flags are not interesting */
+#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
+ 1 << BNX2X_ETH_MAC | \
+ 1 << BNX2X_ISCSI_ETH_MAC | \
+ 1 << BNX2X_NETQ_ETH_MAC)
+#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
+ ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
struct bnx2x_vlan_mac_ramrod_params {
/* Object to run the command from */
@@ -285,6 +292,12 @@ struct bnx2x_vlan_mac_obj {
* entries.
*/
struct list_head head;
+ /* Implement a simple reader/writer lock on the head list.
+ * all these fields should only be accessed under the exe_queue lock
+ */
+ u8 head_reader; /* Num. of readers accessing head list */
+ bool head_exe_request; /* Pending execution request. */
+ unsigned long saved_ramrod_flags; /* Ramrods of pending execution */
/* TODO: Add it's initialization in the init functions */
struct bnx2x_exe_queue_obj exe_queue;
@@ -435,9 +448,6 @@ enum {
BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
};
-void bnx2x_set_mac_in_nig(struct bnx2x *bp,
- bool add, unsigned char *dev_addr, int index);
-
/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
/* RX_MODE ramrod special flags: set in rx_mode_flags field in
@@ -757,7 +767,9 @@ enum {
BNX2X_Q_UPDATE_DEF_VLAN_EN,
BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
- BNX2X_Q_UPDATE_SILENT_VLAN_REM
+ BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ BNX2X_Q_UPDATE_TX_SWITCHING
};
/* Allowed Queue states */
@@ -881,6 +893,24 @@ struct bnx2x_queue_update_params {
u8 cid_index;
};
+struct bnx2x_queue_update_tpa_params {
+ dma_addr_t sge_map;
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 max_tpa_queues;
+ u8 max_sges_pkt;
+ u8 complete_on_both_clients;
+ u8 dont_verify_thr;
+ u8 tpa_mode;
+ u8 _pad;
+
+ u16 sge_buff_sz;
+ u16 max_agg_sz;
+
+ u16 sge_pause_thr_low;
+ u16 sge_pause_thr_high;
+};
+
struct rxq_pause_params {
u16 bd_th_lo;
u16 bd_th_hi;
@@ -975,6 +1005,7 @@ struct bnx2x_queue_state_params {
/* Params according to the current command */
union {
struct bnx2x_queue_update_params update;
+ struct bnx2x_queue_update_tpa_params update_tpa;
struct bnx2x_queue_setup_params setup;
struct bnx2x_queue_init_params init;
struct bnx2x_queue_setup_tx_only_params tx_only;
@@ -1294,16 +1325,14 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
unsigned long *pstate, bnx2x_obj_type type,
struct bnx2x_credit_pool_obj *vlans_pool);
-void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac_obj,
- u8 cl_id, u32 cid, u8 func_id, void *rdata,
- dma_addr_t rdata_mapping, int state,
- unsigned long *pstate, bnx2x_obj_type type,
- struct bnx2x_credit_pool_obj *macs_pool,
- struct bnx2x_credit_pool_obj *vlans_pool);
-
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
int bnx2x_config_vlan_mac(struct bnx2x *bp,
- struct bnx2x_vlan_mac_ramrod_params *p);
+ struct bnx2x_vlan_mac_ramrod_params *p);
int bnx2x_vlan_mac_move(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params *p,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 95861efb505..eda8583f6fc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -12,9 +12,9 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Shmulik Ravid <shmulikr@broadcom.com>
- * Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
*
*/
#include "bnx2x.h"
@@ -102,76 +102,22 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
mmiowb();
barrier();
}
-/* VFOP - VF slow-path operation support */
-#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
+static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ bool print_err)
+{
+ if (!bnx2x_leading_vfq(vf, sp_initialized)) {
+ if (print_err)
+ BNX2X_ERR("Slowpath objects not yet initialized!\n");
+ else
+ DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
+ return false;
+ }
+ return true;
+}
/* VFOP operations states */
-enum bnx2x_vfop_qctor_state {
- BNX2X_VFOP_QCTOR_INIT,
- BNX2X_VFOP_QCTOR_SETUP,
- BNX2X_VFOP_QCTOR_INT_EN
-};
-
-enum bnx2x_vfop_qdtor_state {
- BNX2X_VFOP_QDTOR_HALT,
- BNX2X_VFOP_QDTOR_TERMINATE,
- BNX2X_VFOP_QDTOR_CFCDEL,
- BNX2X_VFOP_QDTOR_DONE
-};
-
-enum bnx2x_vfop_vlan_mac_state {
- BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
- BNX2X_VFOP_VLAN_MAC_CLEAR,
- BNX2X_VFOP_VLAN_MAC_CHK_DONE,
- BNX2X_VFOP_MAC_CONFIG_LIST,
- BNX2X_VFOP_VLAN_CONFIG_LIST,
- BNX2X_VFOP_VLAN_CONFIG_LIST_0
-};
-
-enum bnx2x_vfop_qsetup_state {
- BNX2X_VFOP_QSETUP_CTOR,
- BNX2X_VFOP_QSETUP_VLAN0,
- BNX2X_VFOP_QSETUP_DONE
-};
-
-enum bnx2x_vfop_mcast_state {
- BNX2X_VFOP_MCAST_DEL,
- BNX2X_VFOP_MCAST_ADD,
- BNX2X_VFOP_MCAST_CHK_DONE
-};
-enum bnx2x_vfop_qflr_state {
- BNX2X_VFOP_QFLR_CLR_VLAN,
- BNX2X_VFOP_QFLR_CLR_MAC,
- BNX2X_VFOP_QFLR_TERMINATE,
- BNX2X_VFOP_QFLR_DONE
-};
-
-enum bnx2x_vfop_flr_state {
- BNX2X_VFOP_FLR_QUEUES,
- BNX2X_VFOP_FLR_HW
-};
-
-enum bnx2x_vfop_close_state {
- BNX2X_VFOP_CLOSE_QUEUES,
- BNX2X_VFOP_CLOSE_HW
-};
-
-enum bnx2x_vfop_rxmode_state {
- BNX2X_VFOP_RXMODE_CONFIG,
- BNX2X_VFOP_RXMODE_DONE
-};
-
-enum bnx2x_vfop_qteardown_state {
- BNX2X_VFOP_QTEARDOWN_RXMODE,
- BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
- BNX2X_VFOP_QTEARDOWN_CLR_MAC,
- BNX2X_VFOP_QTEARDOWN_QDTOR,
- BNX2X_VFOP_QTEARDOWN_DONE
-};
-
-#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
-
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_params,
struct bnx2x_queue_setup_params *setup_params,
@@ -215,7 +161,7 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q,
- struct bnx2x_vfop_qctor_params *p,
+ struct bnx2x_vf_queue_construct_params *p,
unsigned long q_type)
{
struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
@@ -265,11 +211,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
- if (vfq_is_leading(q)) {
- __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
- __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
- }
-
/* Setup-op rx parameters */
if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
@@ -289,184 +230,85 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
}
}
-/* VFOP queue construction */
-static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static int bnx2x_vf_queue_create(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_queue_construct_params *qctor)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
- struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
- enum bnx2x_vfop_qctor_state state = vfop->state;
-
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- switch (state) {
- case BNX2X_VFOP_QCTOR_INIT:
-
- /* has this queue already been opened? */
- if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
- BNX2X_Q_LOGICAL_STATE_ACTIVE) {
- DP(BNX2X_MSG_IOV,
- "Entered qctor but queue was already up. Aborting gracefully\n");
- goto op_done;
- }
-
- /* next state */
- vfop->state = BNX2X_VFOP_QCTOR_SETUP;
-
- q_params->cmd = BNX2X_Q_CMD_INIT;
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
-
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_QCTOR_SETUP:
- /* next state */
- vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
-
- /* copy pre-prepared setup params to the queue-state params */
- vfop->op_p->qctor.qstate.params.setup =
- vfop->op_p->qctor.prep_qsetup;
-
- q_params->cmd = BNX2X_Q_CMD_SETUP;
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
+ struct bnx2x_queue_state_params *q_params;
+ int rc = 0;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
- case BNX2X_VFOP_QCTOR_INT_EN:
+ /* Prepare ramrod information */
+ q_params = &qctor->qstate;
+ q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
- /* enable interrupts */
- bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
- USTORM_ID, 0, IGU_INT_ENABLE, 0);
- goto op_done;
- default:
- bnx2x_vfop_default(state);
+ if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
+ goto out;
}
-op_err:
- BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
- vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
-op_done:
- bnx2x_vfop_end(bp, vf, vfop);
-op_pending:
- return;
-}
-
-static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ /* Run Queue 'construction' ramrods */
+ q_params->cmd = BNX2X_Q_CMD_INIT;
+ rc = bnx2x_queue_state_change(bp, q_params);
+ if (rc)
+ goto out;
- vfop->args.qctor.qid = qid;
- vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
+ memcpy(&q_params->params.setup, &qctor->prep_qsetup,
+ sizeof(struct bnx2x_queue_setup_params));
+ q_params->cmd = BNX2X_Q_CMD_SETUP;
+ rc = bnx2x_queue_state_change(bp, q_params);
+ if (rc)
+ goto out;
- bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
- bnx2x_vfop_qctor, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
- cmd->block);
- }
- return -ENOMEM;
+ /* enable interrupts */
+ bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
+ USTORM_ID, 0, IGU_INT_ENABLE, 0);
+out:
+ return rc;
}
-/* VFOP queue destruction */
-static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
- struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
- enum bnx2x_vfop_qdtor_state state = vfop->state;
+ enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
+ BNX2X_Q_CMD_TERMINATE,
+ BNX2X_Q_CMD_CFC_DEL};
+ struct bnx2x_queue_state_params q_params;
+ int rc, i;
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+ /* Prepare ramrod information */
+ memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
+ q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
- switch (state) {
- case BNX2X_VFOP_QDTOR_HALT:
+ if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_STOPPED) {
+ DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
+ goto out;
+ }
- /* has this queue already been stopped? */
- if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
- BNX2X_Q_LOGICAL_STATE_STOPPED) {
- DP(BNX2X_MSG_IOV,
- "Entered qdtor but queue was already stopped. Aborting gracefully\n");
- goto op_done;
+ /* Run Queue 'destruction' ramrods */
+ for (i = 0; i < ARRAY_SIZE(cmds); i++) {
+ q_params.cmd = cmds[i];
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
+ return rc;
}
-
- /* next state */
- vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
-
- q_params->cmd = BNX2X_Q_CMD_HALT;
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
-
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_QDTOR_TERMINATE:
- /* next state */
- vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
-
- q_params->cmd = BNX2X_Q_CMD_TERMINATE;
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
-
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_QDTOR_CFCDEL:
- /* next state */
- vfop->state = BNX2X_VFOP_QDTOR_DONE;
-
- q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
-
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-op_err:
- BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
- vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
-op_done:
- case BNX2X_VFOP_QDTOR_DONE:
- /* invalidate the context */
- qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
- qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
}
-op_pending:
- return;
-}
-
-static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- if (vfop) {
- struct bnx2x_queue_state_params *qstate =
- &vf->op_params.qctor.qstate;
-
- memset(qstate, 0, sizeof(*qstate));
- qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
-
- vfop->args.qdtor.qid = qid;
- vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
-
- bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
- bnx2x_vfop_qdtor, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
- cmd->block);
+out:
+ /* Clean Context */
+ if (bnx2x_vfq(vf, qid, cxt)) {
+ bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
+ bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
}
- DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid);
- return -ENOMEM;
+
+ return 0;
}
static void
@@ -474,865 +316,387 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
{
struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
if (vf) {
+ /* the first igu entry belonging to VFs of this PF */
+ if (!BP_VFDB(bp)->first_vf_igu_entry)
+ BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
+
+ /* the first igu entry belonging to this VF */
if (!vf_sb_count(vf))
vf->igu_base_id = igu_sb_id;
+
++vf_sb_count(vf);
+ ++vf->sb_count;
}
+ BP_VFDB(bp)->vf_sbs_pool++;
}
-/* VFOP MAC/VLAN helpers */
-static inline void bnx2x_vfop_credit(struct bnx2x *bp,
- struct bnx2x_vfop *vfop,
- struct bnx2x_vlan_mac_obj *obj)
+static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *obj,
+ atomic_t *counter)
{
- struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
+ struct list_head *pos;
+ int read_lock;
+ int cnt = 0;
- /* update credit only if there is no error
- * and a valid credit counter
- */
- if (!vfop->rc && args->credit) {
- int cnt = 0;
- struct list_head *pos;
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
+ if (read_lock)
+ DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
- list_for_each(pos, &obj->head)
- cnt++;
+ list_for_each(pos, &obj->head)
+ cnt++;
- atomic_set(args->credit, cnt);
- }
-}
-
-static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
- struct bnx2x_vfop_filter *pos,
- struct bnx2x_vlan_mac_data *user_req)
-{
- user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
- BNX2X_VLAN_MAC_DEL;
+ if (!read_lock)
+ bnx2x_vlan_mac_h_read_unlock(bp, obj);
- switch (pos->type) {
- case BNX2X_VFOP_FILTER_MAC:
- memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
- break;
- case BNX2X_VFOP_FILTER_VLAN:
- user_req->u.vlan.vlan = pos->vid;
- break;
- default:
- BNX2X_ERR("Invalid filter type, skipping\n");
- return 1;
- }
- return 0;
+ atomic_set(counter, cnt);
}
-static int
-bnx2x_vfop_config_vlan0(struct bnx2x *bp,
- struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
- bool add)
+static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid, bool drv_only, bool mac)
{
+ struct bnx2x_vlan_mac_ramrod_params ramrod;
int rc;
- vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
- BNX2X_VLAN_MAC_DEL;
- vlan_mac->user_req.u.vlan.vlan = 0;
-
- rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- if (rc == -EEXIST)
- rc = 0;
- return rc;
-}
-
-static int bnx2x_vfop_config_list(struct bnx2x *bp,
- struct bnx2x_vfop_filters *filters,
- struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
-{
- struct bnx2x_vfop_filter *pos, *tmp;
- struct list_head rollback_list, *filters_list = &filters->head;
- struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
- int rc = 0, cnt = 0;
-
- INIT_LIST_HEAD(&rollback_list);
-
- list_for_each_entry_safe(pos, tmp, filters_list, link) {
- if (bnx2x_vfop_set_user_req(bp, pos, user_req))
- continue;
-
- rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- if (rc >= 0) {
- cnt += pos->add ? 1 : -1;
- list_move(&pos->link, &rollback_list);
- rc = 0;
- } else if (rc == -EEXIST) {
- rc = 0;
- } else {
- BNX2X_ERR("Failed to add a new vlan_mac command\n");
- break;
- }
- }
+ DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
+ mac ? "MACs" : "VLANs");
- /* rollback if error or too many rules added */
- if (rc || cnt > filters->add_cnt) {
- BNX2X_ERR("error or too many rules added. Performing rollback\n");
- list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
- pos->add = !pos->add; /* reverse op */
- bnx2x_vfop_set_user_req(bp, pos, user_req);
- bnx2x_config_vlan_mac(bp, vlan_mac);
- list_del(&pos->link);
- }
- cnt = 0;
- if (!rc)
- rc = -EINVAL;
+ /* Prepare ramrod params */
+ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
+ if (mac) {
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+ } else {
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+ }
+ ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+
+ set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
+ else
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+
+ /* Start deleting */
+ rc = ramrod.vlan_mac_obj->delete_all(bp,
+ ramrod.vlan_mac_obj,
+ &ramrod.user_req.vlan_mac_flags,
+ &ramrod.ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("Failed to delete all %s\n",
+ mac ? "MACs" : "VLANs");
+ return rc;
}
- filters->add_cnt = cnt;
- return rc;
-}
-
-/* VFOP set VLAN/MAC */
-static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
- struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
- struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
-
- enum bnx2x_vfop_vlan_mac_state state = vfop->state;
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- bnx2x_vfop_reset_wq(vf);
-
- switch (state) {
- case BNX2X_VFOP_VLAN_MAC_CLEAR:
- /* next state */
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
- /* do delete */
- vfop->rc = obj->delete_all(bp, obj,
- &vlan_mac->user_req.vlan_mac_flags,
- &vlan_mac->ramrod_flags);
-
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-
- case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
- /* next state */
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
- /* do config */
- vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- if (vfop->rc == -EEXIST)
- vfop->rc = 0;
-
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-
- case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
- vfop->rc = !!obj->raw.check_pending(&obj->raw);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-
- case BNX2X_VFOP_MAC_CONFIG_LIST:
- /* next state */
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
- /* do list config */
- vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
- if (vfop->rc)
- goto op_err;
-
- set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
- vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-
- case BNX2X_VFOP_VLAN_CONFIG_LIST:
- /* next state */
- vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
-
- /* remove vlan0 - could be no-op */
- vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
- if (vfop->rc)
- goto op_err;
-
- /* Do vlan list config. if this operation fails we try to
- * restore vlan0 to keep the queue is working order
- */
- vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
- if (!vfop->rc) {
- set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
- vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- }
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
- case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
- /* next state */
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
+ /* Clear the vlan counters */
+ if (!mac)
+ atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
- if (list_empty(&obj->head))
- /* add vlan0 */
- vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-
- default:
- bnx2x_vfop_default(state);
- }
-op_err:
- BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
-op_done:
- kfree(filters);
- bnx2x_vfop_credit(bp, vfop, obj);
- bnx2x_vfop_end(bp, vf, vfop);
-op_pending:
- return;
+ return 0;
}
-struct bnx2x_vfop_vlan_mac_flags {
- bool drv_only;
- bool dont_consume;
- bool single_cmd;
- bool add;
-};
-
-static void
-bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
- struct bnx2x_vfop_vlan_mac_flags *flags)
+static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_mac_vlan_filter *filter,
+ bool drv_only)
{
- struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
+ struct bnx2x_vlan_mac_ramrod_params ramrod;
+ int rc;
- memset(ramrod, 0, sizeof(*ramrod));
+ DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
+ vf->abs_vfid, filter->add ? "Adding" : "Deleting",
+ filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
+
+ /* Prepare ramrod params */
+ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
+ if (filter->type == BNX2X_VF_FILTER_VLAN) {
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+ ramrod.user_req.u.vlan.vlan = filter->vid;
+ } else {
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+ memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+ }
+ ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
+ BNX2X_VLAN_MAC_DEL;
+
+ /* Verify there are available vlan credits */
+ if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
+ (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
+ vf_vlan_rules_cnt(vf))) {
+ BNX2X_ERR("No credits for vlan [%d >= %d]\n",
+ atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
+ vf_vlan_rules_cnt(vf));
+ return -ENOMEM;
+ }
- /* ramrod flags */
- if (flags->drv_only)
- set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
- if (flags->single_cmd)
- set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
+ set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
+ else
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
- /* mac_vlan flags */
- if (flags->dont_consume)
- set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
+ /* Add/Remove the filter */
+ rc = bnx2x_config_vlan_mac(bp, &ramrod);
+ if (rc && rc != -EEXIST) {
+ BNX2X_ERR("Failed to %s %s\n",
+ filter->add ? "add" : "delete",
+ filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
+ "VLAN");
+ return rc;
+ }
- /* cmd */
- ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
-}
+ /* Update the vlan counters */
+ if (filter->type == BNX2X_VF_FILTER_VLAN)
+ bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
+ &bnx2x_vfq(vf, qid, vlan_count));
-static inline void
-bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
- struct bnx2x_vfop_vlan_mac_flags *flags)
-{
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
- set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
+ return 0;
}
-static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, bool drv_only)
+int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mac_vlan_filters *filters,
+ int qid, bool drv_only)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc = 0, i;
- if (vfop) {
- struct bnx2x_vfop_args_filters filters = {
- .multi_filter = NULL, /* single */
- .credit = NULL, /* consume credit */
- };
- struct bnx2x_vfop_vlan_mac_flags flags = {
- .drv_only = drv_only,
- .dont_consume = (filters.credit != NULL),
- .single_cmd = true,
- .add = false /* don't care */,
- };
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
- &vf->op_params.vlan_mac;
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- /* set ramrod params */
- bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
-
- /* set object */
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
-
- /* set extra args */
- vfop->args.filters = filters;
-
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
- bnx2x_vfop_vlan_mac, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
- cmd->block);
- }
- return -ENOMEM;
-}
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
-int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- struct bnx2x_vfop_filters *macs,
- int qid, bool drv_only)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- if (vfop) {
- struct bnx2x_vfop_args_filters filters = {
- .multi_filter = macs,
- .credit = NULL, /* consume credit */
- };
- struct bnx2x_vfop_vlan_mac_flags flags = {
- .drv_only = drv_only,
- .dont_consume = (filters.credit != NULL),
- .single_cmd = false,
- .add = false, /* don't care since only the items in the
- * filters list affect the sp operation,
- * not the list itself
- */
- };
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
- &vf->op_params.vlan_mac;
-
- /* set ramrod params */
- bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
-
- /* set object */
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
-
- /* set extra args */
- filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
- vfop->args.filters = filters;
-
- bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
- bnx2x_vfop_vlan_mac, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
- cmd->block);
+ /* Prepare ramrod params */
+ for (i = 0; i < filters->count; i++) {
+ rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
+ &filters->filters[i], drv_only);
+ if (rc)
+ break;
}
- return -ENOMEM;
-}
-int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, u16 vid, bool add)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- if (vfop) {
- struct bnx2x_vfop_args_filters filters = {
- .multi_filter = NULL, /* single command */
- .credit = &bnx2x_vfq(vf, qid, vlan_count),
- };
- struct bnx2x_vfop_vlan_mac_flags flags = {
- .drv_only = false,
- .dont_consume = (filters.credit != NULL),
- .single_cmd = true,
- .add = add,
- };
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
- &vf->op_params.vlan_mac;
-
- /* set ramrod params */
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
- ramrod->user_req.u.vlan.vlan = vid;
-
- /* set object */
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
-
- /* set extra args */
- vfop->args.filters = filters;
-
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
- bnx2x_vfop_vlan_mac, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
- cmd->block);
+ /* Rollback if needed */
+ if (i != filters->count) {
+ BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
+ i, filters->count + 1);
+ while (--i >= 0) {
+ filters->filters[i].add = !filters->filters[i].add;
+ bnx2x_vf_mac_vlan_config(bp, vf, qid,
+ &filters->filters[i],
+ drv_only);
+ }
}
- return -ENOMEM;
-}
-
-static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, bool drv_only)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- if (vfop) {
- struct bnx2x_vfop_args_filters filters = {
- .multi_filter = NULL, /* single command */
- .credit = &bnx2x_vfq(vf, qid, vlan_count),
- };
- struct bnx2x_vfop_vlan_mac_flags flags = {
- .drv_only = drv_only,
- .dont_consume = (filters.credit != NULL),
- .single_cmd = true,
- .add = false, /* don't care */
- };
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
- &vf->op_params.vlan_mac;
-
- /* set ramrod params */
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
- /* set object */
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
-
- /* set extra args */
- vfop->args.filters = filters;
+ /* It's our responsibility to free the filters */
+ kfree(filters);
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
- bnx2x_vfop_vlan_mac, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
- cmd->block);
- }
- return -ENOMEM;
+ return rc;
}
-int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- struct bnx2x_vfop_filters *vlans,
- int qid, bool drv_only)
+int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_queue_construct_params *qctor)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- if (vfop) {
- struct bnx2x_vfop_args_filters filters = {
- .multi_filter = vlans,
- .credit = &bnx2x_vfq(vf, qid, vlan_count),
- };
- struct bnx2x_vfop_vlan_mac_flags flags = {
- .drv_only = drv_only,
- .dont_consume = (filters.credit != NULL),
- .single_cmd = false,
- .add = false, /* don't care */
- };
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
- &vf->op_params.vlan_mac;
-
- /* set ramrod params */
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
-
- /* set object */
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
-
- /* set extra args */
- filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
- atomic_read(filters.credit);
-
- vfop->args.filters = filters;
-
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
- bnx2x_vfop_vlan_mac, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
- cmd->block);
- }
- return -ENOMEM;
-}
+ int rc;
-/* VFOP queue setup (queue constructor + set vlan 0) */
-static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- int qid = vfop->args.qctor.qid;
- enum bnx2x_vfop_qsetup_state state = vfop->state;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vfop_qsetup,
- .block = false,
- };
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
- if (vfop->rc < 0)
+ rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
+ if (rc)
goto op_err;
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+ /* Configure vlan0 for leading queue */
+ if (!qid) {
+ struct bnx2x_vf_mac_vlan_filter filter;
- switch (state) {
- case BNX2X_VFOP_QSETUP_CTOR:
- /* init the queue ctor command */
- vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
- vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
- if (vfop->rc)
+ memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
+ filter.type = BNX2X_VF_FILTER_VLAN;
+ filter.add = true;
+ filter.vid = 0;
+ rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
+ if (rc)
goto op_err;
- return;
-
- case BNX2X_VFOP_QSETUP_VLAN0:
- /* skip if non-leading or FPGA/EMU*/
- if (qid)
- goto op_done;
-
- /* init the queue set-vlan command (for vlan 0) */
- vfop->state = BNX2X_VFOP_QSETUP_DONE;
- vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
- if (vfop->rc)
- goto op_err;
- return;
-op_err:
- BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
-op_done:
- case BNX2X_VFOP_QSETUP_DONE:
- vf->cfg_flags |= VF_CFG_VLAN;
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
}
-}
-
-int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- if (vfop) {
- vfop->args.qctor.qid = qid;
- bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
- bnx2x_vfop_qsetup, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
- cmd->block);
- }
- return -ENOMEM;
+ /* Schedule the configuration of any pending vlan filters */
+ vf->cfg_flags |= VF_CFG_VLAN;
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ BNX2X_MSG_IOV);
+ return 0;
+op_err:
+ BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
+ return rc;
}
-/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
-static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- int qid = vfop->args.qx.qid;
- enum bnx2x_vfop_qflr_state state = vfop->state;
- struct bnx2x_queue_state_params *qstate;
- struct bnx2x_vfop_cmd cmd;
-
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
+ int rc;
- cmd.done = bnx2x_vfop_qflr;
- cmd.block = false;
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
- switch (state) {
- case BNX2X_VFOP_QFLR_CLR_VLAN:
- /* vlan-clear-all: driver-only, don't consume credit */
- vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
- if (vfop->rc)
+ /* If needed, clean the filtering data base */
+ if ((qid == LEADING_IDX) &&
+ bnx2x_validate_vf_sp_objs(bp, vf, false)) {
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
+ if (rc)
goto op_err;
- return;
-
- case BNX2X_VFOP_QFLR_CLR_MAC:
- /* mac-clear-all: driver only consume credit */
- vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
- DP(BNX2X_MSG_IOV,
- "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
- vf->abs_vfid, vfop->rc);
- if (vfop->rc)
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
+ if (rc)
goto op_err;
- return;
-
- case BNX2X_VFOP_QFLR_TERMINATE:
- qstate = &vfop->op_p->qctor.qstate;
- memset(qstate , 0, sizeof(*qstate));
- qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
- vfop->state = BNX2X_VFOP_QFLR_DONE;
-
- DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
- vf->abs_vfid, qstate->q_obj->state);
-
- if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
- qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
- qstate->cmd = BNX2X_Q_CMD_TERMINATE;
- vfop->rc = bnx2x_queue_state_change(bp, qstate);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
- } else {
- goto op_done;
- }
-
-op_err:
- BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
- vf->abs_vfid, qid, vfop->rc);
-op_done:
- case BNX2X_VFOP_QFLR_DONE:
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
}
-op_pending:
- return;
-}
-static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ /* Terminate queue */
+ if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
+ struct bnx2x_queue_state_params qstate;
- if (vfop) {
- vfop->args.qx.qid = qid;
- bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
- bnx2x_vfop_qflr, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
- cmd->block);
+ memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
+ qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
+ qstate.cmd = BNX2X_Q_CMD_TERMINATE;
+ set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
+ rc = bnx2x_queue_state_change(bp, &qstate);
+ if (rc)
+ goto op_err;
}
- return -ENOMEM;
-}
-
-/* VFOP multi-casts */
-static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
- struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
- struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
- enum bnx2x_vfop_mcast_state state = vfop->state;
- int i;
-
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- switch (state) {
- case BNX2X_VFOP_MCAST_DEL:
- /* clear existing mcasts */
- vfop->state = BNX2X_VFOP_MCAST_ADD;
- vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_MCAST_ADD:
- if (raw->check_pending(raw))
- goto op_pending;
-
- if (args->mc_num) {
- /* update mcast list on the ramrod params */
- INIT_LIST_HEAD(&mcast->mcast_list);
- for (i = 0; i < args->mc_num; i++)
- list_add_tail(&(args->mc[i].link),
- &mcast->mcast_list);
- /* add new mcasts */
- vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
- vfop->rc = bnx2x_config_mcast(bp, mcast,
- BNX2X_MCAST_CMD_ADD);
- }
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
- case BNX2X_VFOP_MCAST_CHK_DONE:
- vfop->rc = raw->check_pending(raw) ? 1 : 0;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
- default:
- bnx2x_vfop_default(state);
- }
+ return 0;
op_err:
- BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
-op_done:
- kfree(args->mc);
- bnx2x_vfop_end(bp, vf, vfop);
-op_pending:
- return;
+ BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
+ return rc;
}
-int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- bnx2x_mac_addr_t *mcasts,
- int mcast_num, bool drv_only)
-{
- struct bnx2x_vfop *vfop = NULL;
- size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
- struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
- NULL;
-
- if (!mc_sz || mc) {
- vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- int i;
- struct bnx2x_mcast_ramrod_params *ramrod =
- &vf->op_params.mcast;
-
- /* set ramrod params */
- memset(ramrod, 0, sizeof(*ramrod));
- ramrod->mcast_obj = &vf->mcast_obj;
- if (drv_only)
- set_bit(RAMROD_DRV_CLR_ONLY,
- &ramrod->ramrod_flags);
-
- /* copy mcasts pointers */
- vfop->args.mc_list.mc_num = mcast_num;
- vfop->args.mc_list.mc = mc;
- for (i = 0; i < mcast_num; i++)
- mc[i].mac = mcasts[i];
-
- bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
- bnx2x_vfop_mcast, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
- cmd->block);
- } else {
- kfree(mc);
+int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
+{
+ struct bnx2x_mcast_list_elem *mc = NULL;
+ struct bnx2x_mcast_ramrod_params mcast;
+ int rc, i;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ /* Prepare Multicast command */
+ memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
+ mcast.mcast_obj = &vf->mcast_obj;
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
+ else
+ set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
+ if (mc_num) {
+ mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
+ GFP_KERNEL);
+ if (!mc) {
+ BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n");
+ return -ENOMEM;
}
}
- return -ENOMEM;
-}
-/* VFOP rx-mode */
-static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
- enum bnx2x_vfop_rxmode_state state = vfop->state;
-
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+ /* clear existing mcasts */
+ mcast.mcast_list_len = vf->mcast_list_len;
+ vf->mcast_list_len = mc_num;
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
+ if (rc) {
+ BNX2X_ERR("Failed to remove multicasts\n");
+ if (mc)
+ kfree(mc);
+ return rc;
+ }
- switch (state) {
- case BNX2X_VFOP_RXMODE_CONFIG:
- /* next state */
- vfop->state = BNX2X_VFOP_RXMODE_DONE;
+ /* update mcast list on the ramrod params */
+ if (mc_num) {
+ INIT_LIST_HEAD(&mcast.mcast_list);
+ for (i = 0; i < mc_num; i++) {
+ mc[i].mac = mcasts[i];
+ list_add_tail(&mc[i].link,
+ &mcast.mcast_list);
+ }
- vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-op_err:
- BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
-op_done:
- case BNX2X_VFOP_RXMODE_DONE:
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
+ /* add new mcasts */
+ mcast.mcast_list_len = mc_num;
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
+ if (rc)
+ BNX2X_ERR("Faled to add multicasts\n");
+ kfree(mc);
}
-op_pending:
- return;
+
+ return rc;
}
-int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, unsigned long accept_flags)
+static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
+ struct bnx2x_rx_mode_ramrod_params *ramrod,
+ struct bnx2x_virtf *vf,
+ unsigned long accept_flags)
{
struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- if (vfop) {
- struct bnx2x_rx_mode_ramrod_params *ramrod =
- &vf->op_params.rx_mode;
-
- memset(ramrod, 0, sizeof(*ramrod));
-
- /* Prepare ramrod parameters */
- ramrod->cid = vfq->cid;
- ramrod->cl_id = vfq_cl_id(vf, vfq);
- ramrod->rx_mode_obj = &bp->rx_mode_obj;
- ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
-
- ramrod->rx_accept_flags = accept_flags;
- ramrod->tx_accept_flags = accept_flags;
- ramrod->pstate = &vf->filter_state;
- ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
- set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
- set_bit(RAMROD_RX, &ramrod->ramrod_flags);
- set_bit(RAMROD_TX, &ramrod->ramrod_flags);
+ memset(ramrod, 0, sizeof(*ramrod));
+ ramrod->cid = vfq->cid;
+ ramrod->cl_id = vfq_cl_id(vf, vfq);
+ ramrod->rx_mode_obj = &bp->rx_mode_obj;
+ ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
+ ramrod->rx_accept_flags = accept_flags;
+ ramrod->tx_accept_flags = accept_flags;
+ ramrod->pstate = &vf->filter_state;
+ ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
- ramrod->rdata =
- bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
- ramrod->rdata_mapping =
- bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+ set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ set_bit(RAMROD_RX, &ramrod->ramrod_flags);
+ set_bit(RAMROD_TX, &ramrod->ramrod_flags);
- bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
- bnx2x_vfop_rxmode, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
- cmd->block);
- }
- return -ENOMEM;
+ ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
+ ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
}
-/* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
- * queue destructor)
- */
-static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid, unsigned long accept_flags)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- int qid = vfop->args.qx.qid;
- enum bnx2x_vfop_qteardown_state state = vfop->state;
- struct bnx2x_vfop_cmd cmd;
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+ struct bnx2x_rx_mode_ramrod_params ramrod;
- cmd.done = bnx2x_vfop_qdown;
- cmd.block = false;
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- switch (state) {
- case BNX2X_VFOP_QTEARDOWN_RXMODE:
- /* Drop all */
- vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
- vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
- if (vfop->rc)
- goto op_err;
- return;
+ bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+ vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
+ return bnx2x_config_rx_mode(bp, &ramrod);
+}
- case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
- /* vlan-clear-all: don't consume credit */
- vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
- if (vfop->rc)
- goto op_err;
- return;
+int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
+{
+ int rc;
- case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
- /* mac-clear-all: consume credit */
- vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
- if (vfop->rc)
- goto op_err;
- return;
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
- case BNX2X_VFOP_QTEARDOWN_QDTOR:
- /* run the queue destruction flow */
- DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
- vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
- DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
- vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
- DP(BNX2X_MSG_IOV, "returned from cmd\n");
- if (vfop->rc)
+ /* Remove all classification configuration for leading queue */
+ if (qid == LEADING_IDX) {
+ rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
+ if (rc)
goto op_err;
- return;
-op_err:
- BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
- vf->abs_vfid, qid, vfop->rc);
- case BNX2X_VFOP_QTEARDOWN_DONE:
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
- }
-}
-
-int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- if (vfop) {
- vfop->args.qx.qid = qid;
- bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE,
- bnx2x_vfop_qdown, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
- cmd->block);
+ /* Remove filtering if feasible */
+ if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+ false, false);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+ false, true);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
+ if (rc)
+ goto op_err;
+ }
}
- return -ENOMEM;
+ /* Destroy queue */
+ rc = bnx2x_vf_queue_destroy(bp, vf, qid);
+ if (rc)
+ goto op_err;
+ return rc;
+op_err:
+ BNX2X_ERR("vf[%d:%d] error: rc %d\n",
+ vf->abs_vfid, qid, rc);
+ return rc;
}
/* VF enable primitives
@@ -1399,12 +763,12 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (vf->cfg_flags & VF_CFG_INT_SIMD)
val |= IGU_VF_CONF_SINGLE_ISR_EN;
val &= ~IGU_VF_CONF_PARENT_MASK;
- val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */
+ val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
DP(BNX2X_MSG_IOV,
- "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
- vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
+ "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
+ vf->abs_vfid, val);
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
@@ -1476,37 +840,64 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
return 0;
}
+static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ int new)
+{
+ int num = vf_vlan_rules_cnt(vf);
+ int diff = new - num;
+ bool rc = true;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
+ vf->abs_vfid, new, num);
+
+ if (diff > 0)
+ rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
+ else if (diff < 0)
+ rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
+
+ if (rc)
+ vf_vlan_rules_cnt(vf) = new;
+ else
+ DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
+ vf->abs_vfid);
+}
+
/* must be called after the number of PF queues and the number of VFs are
* both known
*/
static void
-bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
+bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
+ struct vf_pf_resc_request *resc = &vf->alloc_resc;
u16 vlan_count = 0;
/* will be set only during VF-ACQUIRE */
resc->num_rxqs = 0;
resc->num_txqs = 0;
- /* no credit calculcis for macs (just yet) */
+ /* no credit calculations for macs (just yet) */
resc->num_mac_filters = 1;
/* divvy up vlan rules */
+ bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
vlan_count = 1 << ilog2(vlan_count);
- resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
+ bnx2x_iov_re_set_vlan_filters(bp, vf,
+ vlan_count / BNX2X_NR_VIRTFN(bp));
/* no real limitation */
resc->num_mc_filters = 0;
/* num_sbs already set */
+ resc->num_sbs = vf->sb_count;
}
/* FLR routines: */
static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
/* reset the state variables */
- bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+ bnx2x_iov_static_resc(bp, vf);
vf->state = VF_FREE;
}
@@ -1530,120 +921,63 @@ static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
bnx2x_tx_hw_flushed(bp, poll_cnt);
}
-static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
- enum bnx2x_vfop_flr_state state = vfop->state;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vfop_flr,
- .block = false,
- };
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+ int rc, i;
- switch (state) {
- case BNX2X_VFOP_FLR_QUEUES:
- /* the cleanup operations are valid if and only if the VF
- * was first acquired.
- */
- if (++(qx->qid) < vf_rxq_count(vf)) {
- vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
- qx->qid);
- if (vfop->rc)
- goto op_err;
- return;
- }
- /* remove multicasts */
- vfop->state = BNX2X_VFOP_FLR_HW;
- vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
- 0, true);
- if (vfop->rc)
- goto op_err;
- return;
- case BNX2X_VFOP_FLR_HW:
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- /* dispatch final cleanup and wait for HW queues to flush */
- bnx2x_vf_flr_clnup_hw(bp, vf);
+ /* the cleanup operations are valid if and only if the VF
+ * was first acquired.
+ */
+ for (i = 0; i < vf_rxq_count(vf); i++) {
+ rc = bnx2x_vf_queue_flr(bp, vf, i);
+ if (rc)
+ goto out;
+ }
- /* release VF resources */
- bnx2x_vf_free_resc(bp, vf);
+ /* remove multicasts */
+ bnx2x_vf_mcast(bp, vf, NULL, 0, true);
- /* re-open the mailbox */
- bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+ /* dispatch final cleanup and wait for HW queues to flush */
+ bnx2x_vf_flr_clnup_hw(bp, vf);
- goto op_done;
- default:
- bnx2x_vfop_default(state);
- }
-op_err:
- BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
-op_done:
- vf->flr_clnup_stage = VF_FLR_ACK;
- bnx2x_vfop_end(bp, vf, vfop);
- bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
-}
+ /* release VF resources */
+ bnx2x_vf_free_resc(bp, vf);
-static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- vfop_handler_t done)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- vfop->args.qx.qid = -1; /* loop */
- bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
- bnx2x_vfop_flr, done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
- }
- return -ENOMEM;
+ /* re-open the mailbox */
+ bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+ return;
+out:
+ BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
+ vf->abs_vfid, i, rc);
}
-static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
+static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
{
- int i = prev_vf ? prev_vf->index + 1 : 0;
struct bnx2x_virtf *vf;
+ int i;
- /* find next VF to cleanup */
-next_vf_to_clean:
- for (;
- i < BNX2X_NR_VIRTFN(bp) &&
- (bnx2x_vf(bp, i, state) != VF_RESET ||
- bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
- i++)
- ;
+ for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
+ /* VF should be RESET & in FLR cleanup states */
+ if (bnx2x_vf(bp, i, state) != VF_RESET ||
+ !bnx2x_vf(bp, i, flr_clnup_stage))
+ continue;
- DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i,
- BNX2X_NR_VIRTFN(bp));
+ DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
+ i, BNX2X_NR_VIRTFN(bp));
- if (i < BNX2X_NR_VIRTFN(bp)) {
vf = BP_VF(bp, i);
/* lock the vf pf channel */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
/* invoke the VF FLR SM */
- if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
- BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
- vf->abs_vfid);
-
- /* mark the VF to be ACKED and continue */
- vf->flr_clnup_stage = VF_FLR_ACK;
- goto next_vf_to_clean;
- }
- return;
- }
-
- /* we are done, update vf records */
- for_each_vf(bp, i) {
- vf = BP_VF(bp, i);
-
- if (vf->flr_clnup_stage != VF_FLR_ACK)
- continue;
+ bnx2x_vf_flr(bp, vf);
- vf->flr_clnup_stage = VF_FLR_EPILOG;
+ /* mark the VF to be ACKED and continue */
+ vf->flr_clnup_stage = false;
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
}
/* Acknowledge the handled VFs.
@@ -1693,7 +1027,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
if (reset) {
/* set as reset and ready for cleanup */
vf->state = VF_RESET;
- vf->flr_clnup_stage = VF_FLR_CLN;
+ vf->flr_clnup_stage = true;
DP(BNX2X_MSG_IOV,
"Initiating Final cleanup for VF %d\n",
@@ -1702,7 +1036,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
}
/* do the FLR cleanup for all marked VFs*/
- bnx2x_vf_flr_clnup(bp, NULL);
+ bnx2x_vf_flr_clnup(bp);
}
/* IOV global initialization routines */
@@ -1726,8 +1060,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
/* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
* the Pf doorbell size although the 2 are independent.
*/
- REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
- BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
/* No security checks for now -
* configure single rule (out of 16) mask = 0x1, value = 0x0,
@@ -1738,20 +1071,16 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
- /* set the number of VF allowed doorbells to the full DQ range */
- REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
-
- /* set the VF doorbell threshold */
- REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
+ /* set the VF doorbell threshold. This threshold represents the amount
+ * of doorbells allowed in the main DORQ fifo for a specific VF.
+ */
+ REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
}
void bnx2x_iov_init_dmae(struct bnx2x *bp)
{
- DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
- if (!IS_SRIOV(bp))
- return;
-
- REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
+ if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
+ REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
}
static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
@@ -1797,7 +1126,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
{
int sb_id;
u32 val;
- u8 fid;
+ u8 fid, current_pf = 0;
/* IGU in normal mode - read CAM */
for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
@@ -1805,16 +1134,18 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
continue;
fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
- if (!(fid & IGU_FID_ENCODE_IS_PF))
+ if (fid & IGU_FID_ENCODE_IS_PF)
+ current_pf = fid & IGU_FID_PF_NUM_MASK;
+ else if (current_pf == BP_FUNC(bp))
bnx2x_vf_set_igu_info(bp, sb_id,
(fid & IGU_FID_VF_NUM_MASK));
-
DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
(fid & IGU_FID_VF_NUM_MASK)), sb_id,
GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
}
+ DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
}
static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
@@ -1880,23 +1211,11 @@ static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
return 0;
}
-static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
-{
- int i;
- u8 queue_count = 0;
-
- if (IS_SRIOV(bp))
- for_each_vf(bp, i)
- queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
-
- return queue_count;
-}
-
/* must be called after PF bars are mapped */
int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
- int num_vfs_param)
+ int num_vfs_param)
{
- int err, i, qcount;
+ int err, i;
struct bnx2x_sriov *iov;
struct pci_dev *dev = bp->pdev;
@@ -1986,7 +1305,6 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
bnx2x_vf(bp, i, index) = i;
bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
bnx2x_vf(bp, i, state) = VF_FREE;
- INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
mutex_init(&bnx2x_vf(bp, i, op_mutex));
bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
}
@@ -1994,18 +1312,22 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
bnx2x_get_vf_igu_cam_info(bp);
- /* get the total queue count and allocate the global queue arrays */
- qcount = bnx2x_iov_get_max_queue_count(bp);
-
/* allocate the queue arrays for all VFs */
- bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
- GFP_KERNEL);
+ bp->vfdb->vfqs = kzalloc(
+ BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
+ GFP_KERNEL);
+
+ DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
+
if (!bp->vfdb->vfqs) {
BNX2X_ERR("failed to allocate vf queue array\n");
err = -ENOMEM;
goto failed;
}
+ /* Prepare the VFs event synchronization mechanism */
+ mutex_init(&bp->vfdb->event_mutex);
+
return 0;
failed:
DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
@@ -2015,6 +1337,8 @@ failed:
void bnx2x_iov_remove_one(struct bnx2x *bp)
{
+ int vf_idx;
+
/* if SRIOV is not enabled there's nothing to do */
if (!IS_SRIOV(bp))
return;
@@ -2023,6 +1347,18 @@ void bnx2x_iov_remove_one(struct bnx2x *bp)
pci_disable_sriov(bp->pdev);
DP(BNX2X_MSG_IOV, "sriov disabled\n");
+ /* disable access to all VFs */
+ for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
+ bnx2x_pretend_func(bp,
+ HW_VF_HANDLE(bp,
+ bp->vfdb->sriov.first_vf_in_pf +
+ vf_idx));
+ DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
+ bp->vfdb->sriov.first_vf_in_pf + vf_idx);
+ bnx2x_vf_enable_internal(bp, 0);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+ }
+
/* free vf database */
__bnx2x_iov_free_vfdb(bp);
}
@@ -2070,7 +1406,9 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)
cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
if (cxt->size) {
- BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
+ cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
+ if (!cxt->addr)
+ goto alloc_mem_err;
} else {
cxt->addr = NULL;
cxt->mapping = 0;
@@ -2080,20 +1418,28 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)
/* allocate vfs ramrods dma memory - client_init and set_mac */
tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
- BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
- tot_size);
+ BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
+ tot_size);
+ if (!BP_VFDB(bp)->sp_dma.addr)
+ goto alloc_mem_err;
BP_VFDB(bp)->sp_dma.size = tot_size;
/* allocate mailboxes */
tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
- BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
- tot_size);
+ BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
+ tot_size);
+ if (!BP_VF_MBX_DMA(bp)->addr)
+ goto alloc_mem_err;
+
BP_VF_MBX_DMA(bp)->size = tot_size;
/* allocate local bulletin boards */
tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
- BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr,
- &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size);
+ BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
+ tot_size);
+ if (!BP_VF_BULLETIN_DMA(bp)->addr)
+ goto alloc_mem_err;
+
BP_VF_BULLETIN_DMA(bp)->size = tot_size;
return 0;
@@ -2119,50 +1465,18 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_sp_map(bp, vf, q_data),
q_type);
- DP(BNX2X_MSG_IOV,
- "initialized vf %d's queue object. func id set to %d\n",
- vf->abs_vfid, q->sp_obj.func_id);
-
- /* mac/vlan objects are per queue, but only those
- * that belong to the leading queue are initialized
- */
- if (vfq_is_leading(q)) {
- /* mac */
- bnx2x_init_mac_obj(bp, &q->mac_obj,
- cl_id, q->cid, func_id,
- bnx2x_vf_sp(bp, vf, mac_rdata),
- bnx2x_vf_sp_map(bp, vf, mac_rdata),
- BNX2X_FILTER_MAC_PENDING,
- &vf->filter_state,
- BNX2X_OBJ_TYPE_RX_TX,
- &bp->macs_pool);
- /* vlan */
- bnx2x_init_vlan_obj(bp, &q->vlan_obj,
- cl_id, q->cid, func_id,
- bnx2x_vf_sp(bp, vf, vlan_rdata),
- bnx2x_vf_sp_map(bp, vf, vlan_rdata),
- BNX2X_FILTER_VLAN_PENDING,
- &vf->filter_state,
- BNX2X_OBJ_TYPE_RX_TX,
- &bp->vlans_pool);
-
- /* mcast */
- bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
- q->cid, func_id, func_id,
- bnx2x_vf_sp(bp, vf, mcast_rdata),
- bnx2x_vf_sp_map(bp, vf, mcast_rdata),
- BNX2X_FILTER_MCAST_PENDING,
- &vf->filter_state,
- BNX2X_OBJ_TYPE_RX_TX);
+ /* sp indication is set only when vlan/mac/etc. are initialized */
+ q->sp_initialized = false;
- vf->leading_rss = cl_id;
- }
+ DP(BNX2X_MSG_IOV,
+ "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
+ vf->abs_vfid, q->sp_obj.func_id, q->cid);
}
/* called by bnx2x_nic_load */
int bnx2x_iov_nic_init(struct bnx2x *bp)
{
- int vfid, qcount, i;
+ int vfid;
if (!IS_SRIOV(bp)) {
DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
@@ -2191,13 +1505,9 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
/* init statically provisioned resources */
- bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+ bnx2x_iov_static_resc(bp, vf);
/* queues are initialized during VF-ACQUIRE */
-
- /* reserve the vf vlan credit */
- bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
-
vf->filter_state = 0;
vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
@@ -2206,6 +1516,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
* It needs to be initialized here so that it can be safely
* handled by a subsequent FLR flow.
*/
+ vf->mcast_list_len = 0;
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
0xFF, 0xFF, 0xFF,
bnx2x_vf_sp(bp, vf, mcast_rdata),
@@ -2227,13 +1538,12 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
}
/* Final VF init */
- qcount = 0;
- for_each_vf(bp, i) {
- struct bnx2x_virtf *vf = BP_VF(bp, i);
+ for_each_vf(bp, vfid) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vfid);
/* fill in the BDF and bars */
- vf->bus = bnx2x_vf_bus(bp, i);
- vf->devfn = bnx2x_vf_devfn(bp, i);
+ vf->bus = bnx2x_vf_bus(bp, vfid);
+ vf->devfn = bnx2x_vf_devfn(bp, vfid);
bnx2x_vf_set_bars(bp, vf);
DP(BNX2X_MSG_IOV,
@@ -2242,10 +1552,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
(unsigned)vf->bars[0].bar, vf->bars[0].size,
(unsigned)vf->bars[1].bar, vf->bars[1].size,
(unsigned)vf->bars[2].bar, vf->bars[2].size);
-
- /* set local queue arrays */
- vf->vfqs = &bp->vfdb->vfqs[qcount];
- qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
}
return 0;
@@ -2261,7 +1567,7 @@ int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
/* release all the VFs */
for_each_vf(bp, i)
- bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
+ bnx2x_vf_release(bp, BP_VF(bp, i));
return 0;
}
@@ -2346,9 +1652,15 @@ static
void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
struct bnx2x_virtf *vf)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
+}
+
+static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
}
int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
@@ -2375,6 +1687,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
case EVENT_RING_OPCODE_MULTICAST_RULES:
case EVENT_RING_OPCODE_FILTERS_RULES:
+ case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
cid = (elem->message.data.eth_event.echo &
BNX2X_SWCID_MASK);
DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
@@ -2386,8 +1699,9 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
goto get_vf;
case EVENT_RING_OPCODE_MALICIOUS_VF:
abs_vfid = elem->message.data.malicious_vf_event.vf_id;
- DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
- abs_vfid, elem->message.data.malicious_vf_event.err_id);
+ BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
+ abs_vfid,
+ elem->message.data.malicious_vf_event.err_id);
goto get_vf;
default:
return 1;
@@ -2438,19 +1752,15 @@ get_vf:
vf->abs_vfid, qidx);
bnx2x_vf_handle_filters_eqe(bp, vf);
break;
+ case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_rss_update_eqe(bp, vf);
case EVENT_RING_OPCODE_VF_FLR:
- DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
- vf->abs_vfid);
- /* Do nothing for now */
- break;
case EVENT_RING_OPCODE_MALICIOUS_VF:
- DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
- abs_vfid, elem->message.data.malicious_vf_event.err_id);
/* Do nothing for now */
- break;
+ return 0;
}
- /* SRIOV: reschedule any 'in_progress' operations */
- bnx2x_iov_sp_event(bp, cid, false);
return 0;
}
@@ -2487,23 +1797,6 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
}
}
-void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
-{
- struct bnx2x_virtf *vf;
-
- /* check if the cid is the VF range */
- if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
- return;
-
- vf = bnx2x_vf_by_cid(bp, vf_cid);
- if (vf) {
- /* set in_progress flag */
- atomic_set(&vf->op_in_progress, 1);
- if (queue_work)
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
- }
-}
-
void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
{
int i;
@@ -2524,10 +1817,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
(is_fcoe ? 0 : 1);
- DP(BNX2X_MSG_IOV,
- "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
- BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
- first_queue_query_index + num_queues_req);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
+ BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
+ first_queue_query_index + num_queues_req);
cur_data_offset = bp->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats) +
@@ -2541,9 +1834,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
struct bnx2x_virtf *vf = BP_VF(bp, i);
if (vf->state != VF_ENABLED) {
- DP(BNX2X_MSG_IOV,
- "vf %d not enabled so no stats for it\n",
- vf->abs_vfid);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "vf %d not enabled so no stats for it\n",
+ vf->abs_vfid);
continue;
}
@@ -2551,6 +1844,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
for_each_vfq(vf, j) {
struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
+ dma_addr_t q_stats_addr =
+ vf->fw_stat_map + j * vf->stats_stride;
+
/* collect stats fro active queues only */
if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
BNX2X_Q_LOGICAL_STATE_STOPPED)
@@ -2558,13 +1854,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
/* create stats query entry for this queue */
cur_query_entry->kind = STATS_TYPE_QUEUE;
- cur_query_entry->index = vfq_cl_id(vf, rxq);
+ cur_query_entry->index = vfq_stat_id(vf, rxq);
cur_query_entry->funcID =
cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
cur_query_entry->address.hi =
- cpu_to_le32(U64_HI(vf->fw_stat_map));
+ cpu_to_le32(U64_HI(q_stats_addr));
cur_query_entry->address.lo =
- cpu_to_le32(U64_LO(vf->fw_stat_map));
+ cpu_to_le32(U64_LO(q_stats_addr));
DP(BNX2X_MSG_IOV,
"added address %x %x for vf %d queue %d client %d\n",
cur_query_entry->address.hi,
@@ -2573,30 +1869,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
cur_query_entry++;
cur_data_offset += sizeof(struct per_queue_stats);
stats_count++;
- }
- }
- bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
-}
-
-void bnx2x_iov_sp_task(struct bnx2x *bp)
-{
- int i;
-
- if (!IS_SRIOV(bp))
- return;
- /* Iterate over all VFs and invoke state transition for VFs with
- * 'in-progress' slow-path operations
- */
- DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
- for_each_vf(bp, i) {
- struct bnx2x_virtf *vf = BP_VF(bp, i);
- if (!list_empty(&vf->op_list_head) &&
- atomic_read(&vf->op_in_progress)) {
- DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
- bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
+ /* all stats are coalesced to the leading queue */
+ if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
+ break;
}
}
+ bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
}
static inline
@@ -2659,11 +1938,12 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ /* Save a vlan filter for the Hypervisor */
return ((req_resc->num_rxqs <= rxq_cnt) &&
(req_resc->num_txqs <= txq_cnt) &&
(req_resc->num_sbs <= vf_sb_count(vf)) &&
(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
- (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
+ (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
}
/* CORE VF API */
@@ -2719,14 +1999,14 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
if (resc->num_mac_filters)
vf_mac_rules_cnt(vf) = resc->num_mac_filters;
- if (resc->num_vlan_filters)
- vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
+ /* Add an additional vlan filter credit for the hypervisor */
+ bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
DP(BNX2X_MSG_IOV,
"Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
vf_sb_count(vf), vf_rxq_count(vf),
vf_txq_count(vf), vf_mac_rules_cnt(vf),
- vf_vlan_rules_cnt(vf));
+ vf_vlan_rules_visible_cnt(vf));
/* Initialize the queues */
if (!vf->vfqs) {
@@ -2738,7 +2018,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q = vfq_get(vf, i);
if (!q) {
- DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
+ BNX2X_ERR("q number %d was not allocated\n", i);
return -EINVAL;
}
@@ -2822,92 +2102,66 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
return 0;
}
-/* VFOP close (teardown the queues, delete mcasts and close HW) */
-static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
- enum bnx2x_vfop_close_state state = vfop->state;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vfop_close,
- .block = false,
- };
+struct set_vf_state_cookie {
+ struct bnx2x_virtf *vf;
+ u8 state;
+};
- if (vfop->rc < 0)
- goto op_err;
+static void bnx2x_set_vf_state(void *cookie)
+{
+ struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+ p->vf->state = p->state;
+}
- switch (state) {
- case BNX2X_VFOP_CLOSE_QUEUES:
+int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int rc = 0, i;
- if (++(qx->qid) < vf_rxq_count(vf)) {
- vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
- if (vfop->rc)
- goto op_err;
- return;
- }
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- /* remove multicasts */
- vfop->state = BNX2X_VFOP_CLOSE_HW;
- vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
- if (vfop->rc)
+ /* Close all queues */
+ for (i = 0; i < vf_rxq_count(vf); i++) {
+ rc = bnx2x_vf_queue_teardown(bp, vf, i);
+ if (rc)
goto op_err;
- return;
+ }
- case BNX2X_VFOP_CLOSE_HW:
+ /* disable the interrupts */
+ DP(BNX2X_MSG_IOV, "disabling igu\n");
+ bnx2x_vf_igu_disable(bp, vf);
- /* disable the interrupts */
- DP(BNX2X_MSG_IOV, "disabling igu\n");
- bnx2x_vf_igu_disable(bp, vf);
+ /* disable the VF */
+ DP(BNX2X_MSG_IOV, "clearing qtbl\n");
+ bnx2x_vf_clr_qtbl(bp, vf);
- /* disable the VF */
- DP(BNX2X_MSG_IOV, "clearing qtbl\n");
- bnx2x_vf_clr_qtbl(bp, vf);
+ /* need to make sure there are no outstanding stats ramrods which may
+ * cause the device to access the VF's stats buffer which it will free
+ * as soon as we return from the close flow.
+ */
+ {
+ struct set_vf_state_cookie cookie;
- goto op_done;
- default:
- bnx2x_vfop_default(state);
+ cookie.vf = vf;
+ cookie.state = VF_ACQUIRED;
+ bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
}
-op_err:
- BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
-op_done:
- vf->state = VF_ACQUIRED;
+
DP(BNX2X_MSG_IOV, "set state to acquired\n");
- bnx2x_vfop_end(bp, vf, vfop);
-}
-int bnx2x_vfop_close_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- vfop->args.qx.qid = -1; /* loop */
- bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
- bnx2x_vfop_close, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
- cmd->block);
- }
- return -ENOMEM;
+ return 0;
+op_err:
+ BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
+ return rc;
}
/* VF release can be called either: 1. The VF was acquired but
* not enabled 2. the vf was enabled or in the process of being
* enabled
*/
-static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vfop_release,
- .block = false,
- };
-
- DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
-
- if (vfop->rc < 0)
- goto op_err;
+ int rc;
DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
vf->state == VF_FREE ? "Free" :
@@ -2918,62 +2172,87 @@ static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
switch (vf->state) {
case VF_ENABLED:
- vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
- if (vfop->rc)
+ rc = bnx2x_vf_close(bp, vf);
+ if (rc)
goto op_err;
- return;
-
+ /* Fallthrough to release resources */
case VF_ACQUIRED:
DP(BNX2X_MSG_IOV, "about to free resources\n");
bnx2x_vf_free_resc(bp, vf);
- DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
- goto op_done;
+ break;
case VF_FREE:
case VF_RESET:
- /* do nothing */
- goto op_done;
default:
- bnx2x_vfop_default(vf->state);
+ break;
}
+ return 0;
op_err:
- BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
-op_done:
- bnx2x_vfop_end(bp, vf, vfop);
+ BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
+ return rc;
}
-int bnx2x_vfop_release_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd)
+int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_config_rss_params *rss)
+{
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+ set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
+ return bnx2x_config_rss(bp, rss);
+}
+
+int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vfpf_tpa_tlv *tlv,
+ struct bnx2x_queue_update_tpa_params *params)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- bnx2x_vfop_opset(-1, /* use vf->state */
- bnx2x_vfop_release, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
- cmd->block);
+ aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
+ struct bnx2x_queue_state_params qstate;
+ int qid, rc = 0;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ /* Set ramrod params */
+ memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
+ memcpy(&qstate.params.update_tpa, params,
+ sizeof(struct bnx2x_queue_update_tpa_params));
+ qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
+ set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
+
+ for (qid = 0; qid < vf_rxq_count(vf); qid++) {
+ qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ qstate.params.update_tpa.sge_map = sge_addr[qid];
+ DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
+ vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
+ U64_LO(sge_addr[qid]));
+ rc = bnx2x_queue_state_change(bp, &qstate);
+ if (rc) {
+ BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
+ U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
+ vf->abs_vfid, qid);
+ return rc;
+ }
}
- return -ENOMEM;
+
+ return rc;
}
/* VF release ~ VF close + VF release-resources
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
*/
-void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
+int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
- struct bnx2x_vfop_cmd cmd = {
- .done = NULL,
- .block = block,
- };
int rc;
+
+ DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
- rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
+ rc = bnx2x_vf_free(bp, vf);
if (rc)
WARN(rc,
"VF[%d] Failed to allocate resources for release op- rc=%d\n",
vf->abs_vfid, rc);
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+ return rc;
}
static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
@@ -2982,19 +2261,15 @@ static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
*sbdf = vf->devfn | (vf->bus << 8);
}
-static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
- struct bnx2x_vf_bar_info *bar_info)
-{
- int n;
-
- bar_info->nr_bars = bp->vfdb->sriov.nres;
- for (n = 0; n < bar_info->nr_bars; n++)
- bar_info->bars[n] = vf->bars[n];
-}
-
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv)
{
+ /* we don't lock the channel for unsupported tlvs */
+ if (!bnx2x_tlv_supported(tlv)) {
+ BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
+ return;
+ }
+
/* lock the channel */
mutex_lock(&vf->op_mutex);
@@ -3009,25 +2284,97 @@ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs expected_tlv)
{
+ enum channel_tlvs current_tlv;
+
+ if (!vf) {
+ BNX2X_ERR("VF was %p\n", vf);
+ return;
+ }
+
+ current_tlv = vf->op_current;
+
+ /* we don't unlock the channel for unsupported tlvs */
+ if (!bnx2x_tlv_supported(expected_tlv))
+ return;
+
WARN(expected_tlv != vf->op_current,
"lock mismatch: expected %d found %d", expected_tlv,
vf->op_current);
+ /* record the locking op */
+ vf->op_current = CHANNEL_TLV_NONE;
+
/* lock the channel */
mutex_unlock(&vf->op_mutex);
/* log the unlock */
DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
vf->abs_vfid, vf->op_current);
+}
- /* record the locking op */
- vf->op_current = CHANNEL_TLV_NONE;
+static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
+{
+ struct bnx2x_queue_state_params q_params;
+ u32 prev_flags;
+ int i, rc;
+
+ /* Verify changes are needed and record current Tx switching state */
+ prev_flags = bp->flags;
+ if (enable)
+ bp->flags |= TX_SWITCHING;
+ else
+ bp->flags &= ~TX_SWITCHING;
+ if (prev_flags == bp->flags)
+ return 0;
+
+ /* Verify state enables the sending of queue ramrods */
+ if ((bp->state != BNX2X_STATE_OPEN) ||
+ (bnx2x_get_q_logical_state(bp,
+ &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE))
+ return 0;
+
+ /* send q. update ramrod to configure Tx switching */
+ memset(&q_params, 0, sizeof(q_params));
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ &q_params.params.update.update_flags);
+ if (enable)
+ __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+ &q_params.params.update.update_flags);
+ else
+ __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+ &q_params.params.update.update_flags);
+
+ /* send the ramrod on all the queues of the PF */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ /* Set the appropriate Queue object */
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure Tx switching\n");
+ return rc;
+ }
+ }
+
+ DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
+ return 0;
}
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
{
struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
+ return -EINVAL;
+ }
+
DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
num_vfs_param, BNX2X_NR_VIRTFN(bp));
@@ -3046,6 +2393,7 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
bp->requested_nr_virtfn = num_vfs_param;
if (num_vfs_param == 0) {
+ bnx2x_set_pf_tx_switching(bp, false);
pci_disable_sriov(dev);
return 0;
} else {
@@ -3053,9 +2401,87 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
}
}
+#define IGU_ENTRY_SIZE 4
+
int bnx2x_enable_sriov(struct bnx2x *bp)
{
int rc = 0, req_vfs = bp->requested_nr_virtfn;
+ int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
+ u32 igu_entry, address;
+ u16 num_vf_queues;
+
+ if (req_vfs == 0)
+ return 0;
+
+ first_vf = bp->vfdb->sriov.first_vf_in_pf;
+
+ /* statically distribute vf sb pool between VFs */
+ num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
+ BP_VFDB(bp)->vf_sbs_pool / req_vfs);
+
+ /* zero previous values learned from igu cam */
+ for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ vf->sb_count = 0;
+ vf_sb_count(BP_VF(bp, vf_idx)) = 0;
+ }
+ bp->vfdb->vf_sbs_pool = 0;
+
+ /* prepare IGU cam */
+ sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
+ address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
+ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+ for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
+ igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
+ vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
+ IGU_REG_MAPPING_MEMORY_VALID;
+ DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
+ sb_idx, vf_idx);
+ REG_WR(bp, address, igu_entry);
+ sb_idx++;
+ address += IGU_ENTRY_SIZE;
+ }
+ }
+
+ /* Reinitialize vf database according to igu cam */
+ bnx2x_get_vf_igu_cam_info(bp);
+
+ DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
+ BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
+
+ qcount = 0;
+ for_each_vf(bp, vf_idx) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ /* set local queue arrays */
+ vf->vfqs = &bp->vfdb->vfqs[qcount];
+ qcount += vf_sb_count(vf);
+ bnx2x_iov_static_resc(bp, vf);
+ }
+
+ /* prepare msix vectors in VF configuration space - the value in the
+ * PCI configuration space should be the index of the last entry,
+ * namely one less than the actual size of the table
+ */
+ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
+ REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
+ num_vf_queues - 1);
+ DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
+ vf_idx, num_vf_queues - 1);
+ }
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* enable sriov. This will probe all the VFs, and consequentially cause
+ * the "acquire" messages to appear on the VF PF channel.
+ */
+ DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
+ bnx2x_disable_sriov(bp);
+
+ rc = bnx2x_set_pf_tx_switching(bp, true);
+ if (rc)
+ return rc;
rc = pci_enable_sriov(bp->pdev, req_vfs);
if (rc) {
@@ -3084,8 +2510,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
pci_disable_sriov(bp->pdev);
}
-static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
- struct bnx2x_virtf *vf)
+static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
+ struct bnx2x_virtf **vf,
+ struct pf_vf_bulletin_content **bulletin)
{
if (bp->state != BNX2X_STATE_OPEN) {
BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3103,8 +2530,24 @@ static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
return -EINVAL;
}
- if (!vf) {
- BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
+ /* init members */
+ *vf = BP_VF(bp, vfidx);
+ *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ if (!*vf) {
+ BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
+ if (!(*vf)->vfqs) {
+ BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
+ if (!*bulletin) {
+ BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
vfidx);
return -EINVAL;
}
@@ -3116,31 +2559,37 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
struct ifla_vf_info *ivi)
{
struct bnx2x *bp = netdev_priv(dev);
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
- struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
+ struct bnx2x_vlan_mac_obj *mac_obj;
+ struct bnx2x_vlan_mac_obj *vlan_obj;
int rc;
- /* sanity */
- rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
- if (!mac_obj || !vlan_obj || !bulletin) {
+ mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ if (!mac_obj || !vlan_obj) {
BNX2X_ERR("VF partially initialized\n");
return -EINVAL;
}
ivi->vf = vfidx;
ivi->qos = 0;
- ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
+ ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
+ ivi->min_tx_rate = 0;
ivi->spoofchk = 1; /*always enabled */
if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */
- mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
- 0, ETH_ALEN);
- vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan,
- 0, VLAN_HLEN);
+ if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
+ mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
+ 0, ETH_ALEN);
+ vlan_obj->get_n_elements(bp, vlan_obj, 1,
+ (u8 *)&ivi->vlan, 0,
+ VLAN_HLEN);
+ }
} else {
/* mac */
if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
@@ -3183,11 +2632,11 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
{
struct bnx2x *bp = netdev_priv(dev);
int rc, q_logical_state;
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
- /* sanity */
- rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
if (!is_valid_ether_addr(mac)) {
@@ -3208,30 +2657,36 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
return rc;
}
- /* is vf initialized and queue set up? */
q_logical_state =
- bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
unsigned long ramrod_flags = 0;
- struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+ struct bnx2x_vlan_mac_obj *mac_obj;
+
+ /* User should be able to see failure reason in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
/* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
/* remove existing eth macs */
+ mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
if (rc) {
BNX2X_ERR("failed to delete eth macs\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
/* remove existing uc list macs */
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
if (rc) {
BNX2X_ERR("failed to delete uc_list macs\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
/* configure the new mac to device */
@@ -3239,21 +2694,30 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
BNX2X_ETH_MAC, &ramrod_flags);
+out:
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
}
- return 0;
+ return rc;
}
int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
{
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+ struct bnx2x_queue_update_params *update_params;
+ struct pf_vf_bulletin_content *bulletin = NULL;
+ struct bnx2x_rx_mode_ramrod_params rx_ramrod;
struct bnx2x *bp = netdev_priv(dev);
- int rc, q_logical_state;
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ struct bnx2x_vlan_mac_obj *vlan_obj;
+ unsigned long vlan_mac_flags = 0;
+ unsigned long ramrod_flags = 0;
+ struct bnx2x_virtf *vf = NULL;
+ unsigned long accept_flags;
+ int rc;
- /* sanity */
- rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
@@ -3268,100 +2732,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
/* update PF's copy of the VF's bulletin. No point in posting the vlan
* to the VF since it doesn't have anything to do with it. But it useful
* to store it here in case the VF is not up yet and we can only
- * configure the vlan later when it does.
+ * configure the vlan later when it does. Treat vlan id 0 as remove the
+ * Host tag.
*/
- bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ if (vlan > 0)
+ bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ else
+ bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
bulletin->vlan = vlan;
/* is vf initialized and queue set up? */
- q_logical_state =
- bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
- if (vf->state == VF_ENABLED &&
- q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
- /* configure the vlan in device on this vf's queue */
- unsigned long ramrod_flags = 0;
- unsigned long vlan_mac_flags = 0;
- struct bnx2x_vlan_mac_obj *vlan_obj =
- &bnx2x_vfq(vf, 0, vlan_obj);
- struct bnx2x_vlan_mac_ramrod_params ramrod_param;
- struct bnx2x_queue_state_params q_params = {NULL};
- struct bnx2x_queue_update_params *update_params;
+ if (vf->state != VF_ENABLED ||
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ return rc;
- memset(&ramrod_param, 0, sizeof(ramrod_param));
+ /* User should be able to see error in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
- /* must lock vfpf channel to protect against vf flows */
- bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
- /* remove existing vlans */
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
- &ramrod_flags);
- if (rc) {
- BNX2X_ERR("failed to delete vlans\n");
- return -EINVAL;
- }
+ /* remove existing vlans */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("failed to delete vlans\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* need to remove/add the VF's accept_any_vlan bit */
+ accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+ if (vlan)
+ clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+ else
+ set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+ bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+ accept_flags);
+ bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+ bnx2x_config_rx_mode(bp, &rx_ramrod);
+
+ /* configure the new vlan to device */
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ ramrod_param.vlan_mac_obj = vlan_obj;
+ ramrod_param.ramrod_flags = ramrod_flags;
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &ramrod_param.user_req.vlan_mac_flags);
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc) {
+ BNX2X_ERR("failed to configure vlan\n");
+ rc = -EINVAL;
+ goto out;
+ }
- /* send queue update ramrod to configure default vlan and silent
- * vlan removal
+ /* send queue update ramrod to configure default vlan and silent
+ * vlan removal
+ */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &update_params->update_flags);
+ if (vlan == 0) {
+ /* if vlan is 0 then we want to leave the VF traffic
+ * untagged, and leave the incoming traffic untouched
+ * (i.e. do not remove any vlan tags).
+ */
+ __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ } else {
+ /* configure default vlan to vf queue and set silent
+ * vlan removal (the vf remains unaware of this vlan).
*/
- __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
- q_params.cmd = BNX2X_Q_CMD_UPDATE;
- q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj);
- update_params = &q_params.params.update;
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
&update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
&update_params->update_flags);
+ update_params->def_vlan = vlan;
+ update_params->silent_removal_value =
+ vlan & VLAN_VID_MASK;
+ update_params->silent_removal_mask = VLAN_VID_MASK;
+ }
- if (vlan == 0) {
- /* if vlan is 0 then we want to leave the VF traffic
- * untagged, and leave the incoming traffic untouched
- * (i.e. do not remove any vlan tags).
- */
- __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
- &update_params->update_flags);
- __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
- &update_params->update_flags);
- } else {
- /* configure the new vlan to device */
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- ramrod_param.vlan_mac_obj = vlan_obj;
- ramrod_param.ramrod_flags = ramrod_flags;
- ramrod_param.user_req.u.vlan.vlan = vlan;
- ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
- rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
- if (rc) {
- BNX2X_ERR("failed to configure vlan\n");
- return -EINVAL;
- }
-
- /* configure default vlan to vf queue and set silent
- * vlan removal (the vf remains unaware of this vlan).
- */
- update_params = &q_params.params.update;
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
- &update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
- &update_params->update_flags);
- update_params->def_vlan = vlan;
- }
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure default VLAN\n");
+ goto out;
+ }
- /* Update the Queue state */
- rc = bnx2x_queue_state_change(bp, &q_params);
- if (rc) {
- BNX2X_ERR("Failed to configure default VLAN\n");
- return rc;
- }
- /* clear the flag indicating that this VF needs its vlan
- * (will only be set if the HV configured th Vlan before vf was
- * and we were called because the VF came up later
- */
- vf->cfg_flags &= ~VF_CFG_VLAN;
+ /* clear the flag indicating that this VF needs its vlan
+ * (will only be set if the HV configured the Vlan before vf was
+ * up and we were called because the VF came up later
+ */
+out:
+ vf->cfg_flags &= ~VF_CFG_VLAN;
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
- bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
- }
- return 0;
+ return rc;
}
/* crc is the first field in the bulletin board. Compute the crc over the
@@ -3411,7 +2893,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
/* the mac address in bulletin board is valid and is new */
if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
- memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
+ !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) {
/* update new mac to net device */
memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
}
@@ -3431,13 +2913,9 @@ void bnx2x_timer_sriov(struct bnx2x *bp)
bnx2x_sample_bulletin(bp);
/* if channel is down we need to self destruct */
- if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
- }
+ if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+ BNX2X_MSG_IOV);
}
void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
@@ -3446,51 +2924,37 @@ void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
return bp->regview + PXP_VF_ADDR_DB_START;
}
+void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
+{
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
+ sizeof(union pf_vf_bulletin));
+}
+
int bnx2x_vf_pci_alloc(struct bnx2x *bp)
{
mutex_init(&bp->vf2pf_mutex);
/* allocate vf2pf mailbox for vf to pf channel */
- BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
- sizeof(struct bnx2x_vf_mbx_msg));
+ bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+ if (!bp->vf2pf_mbox)
+ goto alloc_mem_err;
/* allocate pf 2 vf bulletin board */
- BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
- sizeof(union pf_vf_bulletin));
+ bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
+ sizeof(union pf_vf_bulletin));
+ if (!bp->pf2vf_bulletin)
+ goto alloc_mem_err;
return 0;
alloc_mem_err:
- BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
- sizeof(struct bnx2x_vf_mbx_msg));
- BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
- sizeof(union pf_vf_bulletin));
+ bnx2x_vf_pci_dealloc(bp);
return -ENOMEM;
}
-int bnx2x_open_epilog(struct bnx2x *bp)
-{
- /* Enable sriov via delayed work. This must be done via delayed work
- * because it causes the probe of the vf devices to be run, which invoke
- * register_netdevice which must have rtnl lock taken. As we are holding
- * the lock right now, that could only work if the probe would not take
- * the lock. However, as the probe of the vf may be called from other
- * contexts as well (such as passthrough to vm fails) it can't assume
- * the lock is being held for it. Using delayed work here allows the
- * probe code to simply take the lock (i.e. wait for it to be released
- * if it is being held). We only want to do this if the number of VFs
- * was set before PF driver was loaded.
- */
- if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
- }
-
- return 0;
-}
-
void bnx2x_iov_channel_down(struct bnx2x *bp)
{
int vf_idx;
@@ -3510,3 +2974,28 @@ void bnx2x_iov_channel_down(struct bnx2x *bp)
bnx2x_post_vf_bulletin(bp, vf_idx);
}
}
+
+void bnx2x_iov_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
+
+ if (!netif_running(bp->dev))
+ return;
+
+ if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
+ &bp->iov_task_state))
+ bnx2x_vf_handle_flr_event(bp);
+
+ if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
+ &bp->iov_task_state))
+ bnx2x_vf_mbx(bp);
+}
+
+void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
+{
+ smp_mb__before_atomic();
+ set_bit(flag, &bp->iov_task_state);
+ smp_mb__after_atomic();
+ DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
+ queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index d143a7cdbbb..96c575e147a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -12,9 +12,9 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Shmulik Ravid <shmulikr@broadcom.com>
- * Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
*/
#ifndef BNX2X_SRIOV_H
#define BNX2X_SRIOV_H
@@ -30,6 +30,8 @@ enum sample_bulletin_result {
#ifdef CONFIG_BNX2X_SRIOV
+extern struct workqueue_struct *bnx2x_iov_wq;
+
/* The bnx2x device structure holds vfdb structure described below.
* The VF array is indexed by the relative vfid.
*/
@@ -74,6 +76,7 @@ struct bnx2x_vf_queue {
/* VLANs object */
struct bnx2x_vlan_mac_obj vlan_obj;
atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
+ unsigned long accept_flags; /* last accept flags configured */
/* Queue Slow-path State object */
struct bnx2x_queue_sp_obj sp_obj;
@@ -81,108 +84,36 @@ struct bnx2x_vf_queue {
u32 cid;
u16 index;
u16 sb_idx;
+ bool is_leading;
+ bool sp_initialized;
};
-/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
- * q-init, q-setup and SB index
+/* struct bnx2x_vf_queue_construct_params - prepare queue construction
+ * parameters: q-init, q-setup and SB index
*/
-struct bnx2x_vfop_qctor_params {
+struct bnx2x_vf_queue_construct_params {
struct bnx2x_queue_state_params qstate;
struct bnx2x_queue_setup_params prep_qsetup;
};
-/* VFOP parameters (one copy per VF) */
-union bnx2x_vfop_params {
- struct bnx2x_vlan_mac_ramrod_params vlan_mac;
- struct bnx2x_rx_mode_ramrod_params rx_mode;
- struct bnx2x_mcast_ramrod_params mcast;
- struct bnx2x_config_rss_params rss;
- struct bnx2x_vfop_qctor_params qctor;
-};
-
/* forward */
struct bnx2x_virtf;
/* VFOP definitions */
-typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf);
-
-struct bnx2x_vfop_cmd {
- vfop_handler_t done;
- bool block;
-};
-/* VFOP queue filters command additional arguments */
-struct bnx2x_vfop_filter {
- struct list_head link;
+struct bnx2x_vf_mac_vlan_filter {
int type;
-#define BNX2X_VFOP_FILTER_MAC 1
-#define BNX2X_VFOP_FILTER_VLAN 2
+#define BNX2X_VF_FILTER_MAC 1
+#define BNX2X_VF_FILTER_VLAN 2
bool add;
u8 *mac;
u16 vid;
};
-struct bnx2x_vfop_filters {
- int add_cnt;
- struct list_head head;
- struct bnx2x_vfop_filter filters[];
-};
-
-/* transient list allocated, built and saved until its
- * passed to the SP-VERBs layer.
- */
-struct bnx2x_vfop_args_mcast {
- int mc_num;
- struct bnx2x_mcast_list_elem *mc;
-};
-
-struct bnx2x_vfop_args_qctor {
- int qid;
- u16 sb_idx;
-};
-
-struct bnx2x_vfop_args_qdtor {
- int qid;
- struct eth_context *cxt;
-};
-
-struct bnx2x_vfop_args_defvlan {
- int qid;
- bool enable;
- u16 vid;
- u8 prio;
-};
-
-struct bnx2x_vfop_args_qx {
- int qid;
- bool en_add;
-};
-
-struct bnx2x_vfop_args_filters {
- struct bnx2x_vfop_filters *multi_filter;
- atomic_t *credit; /* non NULL means 'don't consume credit' */
-};
-
-union bnx2x_vfop_args {
- struct bnx2x_vfop_args_mcast mc_list;
- struct bnx2x_vfop_args_qctor qctor;
- struct bnx2x_vfop_args_qdtor qdtor;
- struct bnx2x_vfop_args_defvlan defvlan;
- struct bnx2x_vfop_args_qx qx;
- struct bnx2x_vfop_args_filters filters;
-};
-
-struct bnx2x_vfop {
- struct list_head link;
- int rc; /* return code */
- int state; /* next state */
- union bnx2x_vfop_args args; /* extra arguments */
- union bnx2x_vfop_params *op_p; /* ramrod params */
-
- /* state machine callbacks */
- vfop_handler_t transition;
- vfop_handler_t done;
+struct bnx2x_vf_mac_vlan_filters {
+ int count;
+ struct bnx2x_vf_mac_vlan_filter filters[];
};
/* vf context */
@@ -194,6 +125,7 @@ struct bnx2x_virtf {
#define VF_CFG_INT_SIMD 0x0008
#define VF_CACHE_LINE 0x0010
#define VF_CFG_VLAN 0x0020
+#define VF_CFG_STATS_COALESCE 0x0040
u8 state;
#define VF_FREE 0 /* VF ready to be acquired holds no resc */
@@ -201,18 +133,11 @@ struct bnx2x_virtf {
#define VF_ENABLED 2 /* VF Enabled */
#define VF_RESET 3 /* VF FLR'd, pending cleanup */
- /* non 0 during flr cleanup */
- u8 flr_clnup_stage;
-#define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup'
- * sans the end-wait
- */
-#define VF_FLR_ACK 2 /* ACK flr notification */
-#define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW
- * ~ final cleanup' end wait
- */
+ bool flr_clnup_stage; /* true during flr cleanup */
/* dma */
dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
+ u16 stats_stride;
dma_addr_t spq_map;
dma_addr_t bulletin_map;
@@ -234,12 +159,17 @@ struct bnx2x_virtf {
#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
+ /* Hide a single vlan filter credit for the hypervisor */
+#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1)
u8 sb_count; /* actual number of SBs */
u8 igu_base_id; /* base igu status block id */
struct bnx2x_vf_queue *vfqs;
-#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
+#define LEADING_IDX 0
+#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX)
+#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
+#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var)
u8 index; /* index in the vf array */
u8 abs_vfid;
@@ -262,17 +192,13 @@ struct bnx2x_virtf {
int leading_rss;
/* MCAST object */
+ int mcast_list_len;
struct bnx2x_mcast_obj mcast_obj;
/* RSS configuration object */
struct bnx2x_rss_config_obj rss_conf_obj;
/* slow-path operations */
- atomic_t op_in_progress;
- int op_rc;
- bool op_wait_blocking;
- struct list_head op_list_head;
- union bnx2x_vfop_params op_params;
struct mutex op_mutex; /* one vfop at a time mutex */
enum channel_tlvs op_current;
};
@@ -330,11 +256,6 @@ struct bnx2x_vf_mbx {
u32 vf_addr_hi;
struct vfpf_first_tlv first_tlv; /* saved VF request header */
-
- u8 flags;
-#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
- * more then one pending msg
- */
};
struct bnx2x_vf_sp {
@@ -358,6 +279,10 @@ struct bnx2x_vf_sp {
struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data;
} q_data;
+
+ union {
+ struct eth_rss_update_ramrod_data e2;
+ } rss_rdata;
};
struct hw_dma {
@@ -403,6 +328,14 @@ struct bnx2x_vfdb {
#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
u32 flrd_vfs[FLRD_VFS_DWORDS];
+
+ /* the number of msix vectors belonging to this PF designated for VFs */
+ u16 vf_sbs_pool;
+ u16 first_vf_igu_entry;
+
+ /* sp_rtnl synchronization */
+ struct mutex event_mutex;
+ u64 event_occur;
};
/* queue access */
@@ -411,11 +344,6 @@ static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
return &(vf->vfqs[index]);
}
-static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq)
-{
- return (vfq->index == 0);
-}
-
/* FW ids */
static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
{
@@ -434,7 +362,10 @@ static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
{
- return vfq_cl_id(vf, q);
+ if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
+ return vf->leading_rss;
+ else
+ return vfq_cl_id(vf, q);
}
static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
@@ -454,13 +385,13 @@ void bnx2x_iov_init_dq(struct bnx2x *bp);
void bnx2x_iov_init_dmae(struct bnx2x *bp);
void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj);
-void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work);
int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
-void bnx2x_iov_sp_task(struct bnx2x *bp);
/* global vf mailbox routines */
-void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event);
+void bnx2x_vf_mbx(struct bnx2x *bp);
+void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event);
void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
/* CORE VF API */
@@ -473,162 +404,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
dma_addr_t *sb_map);
-/* VFOP generic helpers */
-#define bnx2x_vfop_default(state) do { \
- BNX2X_ERR("Bad state %d\n", (state)); \
- vfop->rc = -EINVAL; \
- goto op_err; \
- } while (0)
-
-enum {
- VFOP_DONE,
- VFOP_CONT,
- VFOP_VERIFY_PEND,
-};
-
-#define bnx2x_vfop_finalize(vf, rc, next) do { \
- if ((rc) < 0) \
- goto op_err; \
- else if ((rc) > 0) \
- goto op_pending; \
- else if ((next) == VFOP_DONE) \
- goto op_done; \
- else if ((next) == VFOP_VERIFY_PEND) \
- BNX2X_ERR("expected pending\n"); \
- else { \
- DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \
- atomic_set(&vf->op_in_progress, 1); \
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \
- return; \
- } \
- } while (0)
-
-#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \
- do { \
- vfop->state = first_state; \
- vfop->op_p = &vf->op_params; \
- vfop->transition = trans_hndlr; \
- vfop->done = done_hndlr; \
- } while (0)
-
-static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
- struct bnx2x_virtf *vf)
-{
- WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
- WARN_ON(list_empty(&vf->op_list_head));
- return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link);
-}
-
-static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp,
- struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL);
-
- WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
- if (vfop) {
- INIT_LIST_HEAD(&vfop->link);
- list_add(&vfop->link, &vf->op_list_head);
- }
- return vfop;
-}
-
-static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf,
- struct bnx2x_vfop *vfop)
-{
- /* rc < 0 - error, otherwise set to 0 */
- DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc);
- if (vfop->rc >= 0)
- vfop->rc = 0;
- DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc);
-
- /* unlink the current op context and propagate error code
- * must be done before invoking the 'done()' handler
- */
- WARN(!mutex_is_locked(&vf->op_mutex),
- "about to access vf op linked list but mutex was not locked!");
- list_del(&vfop->link);
-
- if (list_empty(&vf->op_list_head)) {
- DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc);
- vf->op_rc = vfop->rc;
- DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
- vf->op_rc, vfop->rc);
- } else {
- struct bnx2x_vfop *cur_vfop;
-
- DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc);
- cur_vfop = bnx2x_vfop_cur(bp, vf);
- cur_vfop->rc = vfop->rc;
- DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
- vf->op_rc, vfop->rc);
- }
-
- /* invoke done handler */
- if (vfop->done) {
- DP(BNX2X_MSG_IOV, "calling done handler\n");
- vfop->done(bp, vf);
- } else {
- /* there is no done handler for the operation to unlock
- * the mutex. Must have gotten here from PF initiated VF RELEASE
- */
- bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
- }
-
- DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n",
- vf->op_rc, vfop->rc);
-
- /* if this is the last nested op reset the wait_blocking flag
- * to release any blocking wrappers, only after 'done()' is invoked
- */
- if (list_empty(&vf->op_list_head)) {
- DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc);
- vf->op_wait_blocking = false;
- }
-
- kfree(vfop);
-}
-
-static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp,
- struct bnx2x_virtf *vf)
-{
- /* can take a while if any port is running */
- int cnt = 5000;
-
- might_sleep();
- while (cnt--) {
- if (vf->op_wait_blocking == false) {
-#ifdef BNX2X_STOP_ON_ERROR
- DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt);
-#endif
- return 0;
- }
- usleep_range(1000, 2000);
-
- if (bp->panic)
- return -EIO;
- }
-
- /* timeout! */
-#ifdef BNX2X_STOP_ON_ERROR
- bnx2x_panic();
-#endif
-
- return -EBUSY;
-}
-
-static inline int bnx2x_vfop_transition(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- vfop_handler_t transition,
- bool block)
-{
- if (block)
- vf->op_wait_blocking = true;
- transition(bp, vf);
- if (block)
- return bnx2x_vfop_wait_blocking(bp, vf);
- return 0;
-}
-
/* VFOP queue construction helpers */
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_params,
@@ -643,60 +418,41 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q,
- struct bnx2x_vfop_qctor_params *p,
+ struct bnx2x_vf_queue_construct_params *p,
unsigned long q_type);
-int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- struct bnx2x_vfop_filters *macs,
- int qid, bool drv_only);
-
-int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, u16 vid, bool add);
-
-int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- struct bnx2x_vfop_filters *vlans,
- int qid, bool drv_only);
-
-int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid);
-
-int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid);
-
-int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- bnx2x_mac_addr_t *mcasts,
- int mcast_num, bool drv_only);
-
-int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, unsigned long accept_flags);
-
-int bnx2x_vfop_close_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd);
-
-int bnx2x_vfop_release_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd);
+
+int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mac_vlan_filters *filters,
+ int qid, bool drv_only);
+
+int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_queue_construct_params *qctor);
+
+int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid);
+
+int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only);
+
+int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid, unsigned long accept_flags);
+
+int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_config_rss_params *rss);
+
+int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vfpf_tpa_tlv *tlv,
+ struct bnx2x_queue_update_tpa_params *params);
/* VF release ~ VF close + VF release-resources
*
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
*/
-void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block);
+int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf);
int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
@@ -709,13 +465,6 @@ void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);
/* Handles an FLR (or VF_DISABLE) notification form the MCP */
void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
-void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
- u16 length);
-void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
- u16 type, u16 length);
-void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv);
-void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
-
bool bnx2x_tlv_supported(u16 tlvtype);
u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
@@ -730,9 +479,11 @@ int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
-int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx);
-int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool is_leading);
int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
+int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params);
int bnx2x_vfpf_set_mcast(struct net_device *dev);
int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
@@ -753,30 +504,32 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
void bnx2x_timer_sriov(struct bnx2x *bp);
void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
+void bnx2x_vf_pci_dealloc(struct bnx2x *bp);
int bnx2x_vf_pci_alloc(struct bnx2x *bp);
int bnx2x_enable_sriov(struct bnx2x *bp);
void bnx2x_disable_sriov(struct bnx2x *bp);
static inline int bnx2x_vf_headroom(struct bnx2x *bp)
{
- return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
+ return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF;
}
void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
void bnx2x_iov_channel_down(struct bnx2x *bp);
-int bnx2x_open_epilog(struct bnx2x *bp);
+
+void bnx2x_iov_task(struct work_struct *work);
+
+void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
#else /* CONFIG_BNX2X_SRIOV */
static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj) {}
-static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid,
- bool queue_work) {}
static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
union event_ring_elem *elem) {return 1; }
-static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {}
-static inline void bnx2x_vf_mbx(struct bnx2x *bp,
- struct vf_pf_event_data *vfpf_event) {}
+static inline void bnx2x_vf_mbx(struct bnx2x *bp) {}
+static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event) {}
static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
@@ -793,10 +546,11 @@ static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
-static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; }
-static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
+static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
u8 vf_qid, bool set) {return 0; }
+static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params) {return 0; }
static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
@@ -817,11 +571,14 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
return NULL;
}
+static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {}
static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
-static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
+
+static inline void bnx2x_iov_task(struct work_struct *work) {}
+static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {}
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 98366abd02b..ca47665f94b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -196,7 +196,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
} else if (bp->func_stx) {
*stats_comp = 0;
- bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+ bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);
}
}
@@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
* Statistics service functions
*/
-static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+/* should be called under stats_sema */
+static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
{
struct dmae_command *dmae;
u32 opcode;
@@ -518,29 +519,47 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
*stats_comp = 0;
}
-static void bnx2x_stats_start(struct bnx2x *bp)
+/* should be called under stats_sema */
+static void __bnx2x_stats_start(struct bnx2x *bp)
{
- /* vfs travel through here as part of the statistics FSM, but no action
- * is required
- */
- if (IS_VF(bp))
- return;
+ if (IS_PF(bp)) {
+ if (bp->port.pmf)
+ bnx2x_port_stats_init(bp);
- if (bp->port.pmf)
- bnx2x_port_stats_init(bp);
+ else if (bp->func_stx)
+ bnx2x_func_stats_init(bp);
- else if (bp->func_stx)
- bnx2x_func_stats_init(bp);
+ bnx2x_hw_stats_post(bp);
+ bnx2x_storm_stats_post(bp);
+ }
- bnx2x_hw_stats_post(bp);
- bnx2x_storm_stats_post(bp);
+ bp->stats_started = true;
+}
+
+static void bnx2x_stats_start(struct bnx2x *bp)
+{
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
}
static void bnx2x_stats_pmf_start(struct bnx2x *bp)
{
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
- bnx2x_stats_pmf_update(bp);
- bnx2x_stats_start(bp);
+ __bnx2x_stats_pmf_update(bp);
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
+}
+
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+{
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+ __bnx2x_stats_pmf_update(bp);
+ up(&bp->stats_sema);
}
static void bnx2x_stats_restart(struct bnx2x *bp)
@@ -550,8 +569,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
*/
if (IS_VF(bp))
return;
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
- bnx2x_stats_start(bp);
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
}
static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -888,9 +910,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
/* Make sure we use the value of the counter
* used for sending the last stats ramrod.
*/
- spin_lock_bh(&bp->stats_lock);
cur_stats_counter = bp->stats_counter - 1;
- spin_unlock_bh(&bp->stats_lock);
/* are storm stats valid? */
if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
@@ -1227,12 +1247,18 @@ static void bnx2x_stats_update(struct bnx2x *bp)
{
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
- if (bnx2x_edebug_stats_stopped(bp))
+ /* we run update from timer context, so give up
+ * if somebody is in the middle of transition
+ */
+ if (down_trylock(&bp->stats_sema))
return;
+ if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
+ goto out;
+
if (IS_PF(bp)) {
if (*stats_comp != DMAE_COMP_VAL)
- return;
+ goto out;
if (bp->port.pmf)
bnx2x_hw_stats_update(bp);
@@ -1242,7 +1268,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
BNX2X_ERR("storm stats were not updated for 3 times\n");
bnx2x_panic();
}
- return;
+ goto out;
}
} else {
/* vf doesn't collect HW statistics, and doesn't get completions
@@ -1256,7 +1282,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
/* vf is done */
if (IS_VF(bp))
- return;
+ goto out;
if (netif_msg_timer(bp)) {
struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1267,6 +1293,9 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_storm_stats_post(bp);
+
+out:
+ up(&bp->stats_sema);
}
static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1332,6 +1361,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
{
int update = 0;
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+
+ bp->stats_started = false;
+
bnx2x_stats_comp(bp);
if (bp->port.pmf)
@@ -1348,6 +1382,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_stats_comp(bp);
}
+
+ up(&bp->stats_sema);
}
static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1376,15 +1412,17 @@ static const struct {
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
{
enum bnx2x_stats_state state;
+ void (*action)(struct bnx2x *bp);
if (unlikely(bp->panic))
return;
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
+ action = bnx2x_stats_stm[state][event].action;
spin_unlock_bh(&bp->stats_lock);
- bnx2x_stats_stm[state][event].action(bp);
+ action(bp);
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -1955,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
estats->mac_discard);
}
}
+
+void bnx2x_stats_safe_exec(struct bnx2x *bp,
+ void (func_to_exec)(void *cookie),
+ void *cookie){
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+ bnx2x_stats_comp(bp);
+ func_to_exec(cookie);
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 853824d258e..2beceaefdee 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -539,6 +539,9 @@ struct bnx2x;
void bnx2x_memset_stats(struct bnx2x *bp);
void bnx2x_stats_init(struct bnx2x *bp);
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
+void bnx2x_stats_safe_exec(struct bnx2x *bp,
+ void (func_to_exec)(void *cookie),
+ void *cookie);
/**
* bnx2x_save_statistics - save statistics when unloading.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 2088063151d..d712d0ddd71 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -12,18 +12,20 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Shmulik Ravid <shmulikr@broadcom.com>
- * Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
*/
#include "bnx2x.h"
#include "bnx2x_cmn.h"
#include <linux/crc32.h>
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
+
/* place a given tlv on the tlv buffer at a given offset */
-void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
- u16 length)
+static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
+ u16 offset, u16 type, u16 length)
{
struct channel_tlv *tl =
(struct channel_tlv *)(tlvs_list + offset);
@@ -33,8 +35,8 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
}
/* Clear the mailbox and init the header of the first tlv */
-void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
- u16 type, u16 length)
+static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
+ u16 type, u16 length)
{
mutex_lock(&bp->vf2pf_mutex);
@@ -52,7 +54,8 @@ void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
}
/* releases the mailbox */
-void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
+static void bnx2x_vfpf_finalize(struct bnx2x *bp,
+ struct vfpf_first_tlv *first_tlv)
{
DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
first_tlv->tl.type);
@@ -60,8 +63,32 @@ void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
mutex_unlock(&bp->vf2pf_mutex);
}
+/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
+static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
+ enum channel_tlvs req_tlv)
+{
+ struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+ do {
+ if (tlv->type == req_tlv)
+ return tlv;
+
+ if (!tlv->length) {
+ BNX2X_ERR("Found TLV with length 0\n");
+ return NULL;
+ }
+
+ tlvs_list += tlv->length;
+ tlv = (struct channel_tlv *)tlvs_list;
+ } while (tlv->type != CHANNEL_TLV_LIST_END);
+
+ DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
+
+ return NULL;
+}
+
/* list the types and lengths of the tlvs on the buffer */
-void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
+static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
{
int i = 1;
struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
@@ -128,7 +155,7 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
*done = PFVF_STATUS_SUCCESS;
- return 0;
+ return -EINVAL;
}
/* Write message address */
@@ -184,7 +211,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
return -EINVAL;
}
- BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
+ DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
*vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
@@ -196,6 +223,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
int rc = 0, attempts = 0;
struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
+ struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
u32 vf_id;
bool resources_acquired = false;
@@ -219,8 +247,14 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* pf 2 vf bulletin board address */
req->bulletin_addr = bp->pf2vf_bulletin_mapping;
+ /* Request physical port identifier */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
+ CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
+
/* add list termination tlv */
- bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ bnx2x_add_tlv(bp, req,
+ req->first_tlv.tl.length + sizeof(struct channel_tlv),
+ CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* output tlvs list */
@@ -257,17 +291,23 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* humble our request */
req->resc_request.num_txqs =
- bp->acquire_resp.resc.num_txqs;
+ min(req->resc_request.num_txqs,
+ bp->acquire_resp.resc.num_txqs);
req->resc_request.num_rxqs =
- bp->acquire_resp.resc.num_rxqs;
+ min(req->resc_request.num_rxqs,
+ bp->acquire_resp.resc.num_rxqs);
req->resc_request.num_sbs =
- bp->acquire_resp.resc.num_sbs;
+ min(req->resc_request.num_sbs,
+ bp->acquire_resp.resc.num_sbs);
req->resc_request.num_mac_filters =
- bp->acquire_resp.resc.num_mac_filters;
+ min(req->resc_request.num_mac_filters,
+ bp->acquire_resp.resc.num_mac_filters);
req->resc_request.num_vlan_filters =
- bp->acquire_resp.resc.num_vlan_filters;
+ min(req->resc_request.num_vlan_filters,
+ bp->acquire_resp.resc.num_vlan_filters);
req->resc_request.num_mc_filters =
- bp->acquire_resp.resc.num_mc_filters;
+ min(req->resc_request.num_mc_filters,
+ bp->acquire_resp.resc.num_mc_filters);
/* Clear response buffer */
memset(&bp->vf2pf_mbox->resp, 0,
@@ -281,6 +321,15 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
}
}
+ /* Retrieve physical port id (if possible) */
+ phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
+ bnx2x_search_tlv_list(bp, resp,
+ CHANNEL_TLV_PHYS_PORT_ID);
+ if (phys_port_resp) {
+ memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
+ bp->flags |= HAS_PHYS_PORT_ID;
+ }
+
/* get HW info */
bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
bp->link_params.chip_id = bp->common.chip_id;
@@ -293,7 +342,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->common.flash_size = 0;
bp->flags |=
NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
- bp->igu_sb_cnt = 1;
+ bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver));
@@ -373,6 +422,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp)
req->stats_addr = bp->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats);
+ req->stats_stride = sizeof(struct per_queue_stats);
+
/* add list termination tlv */
bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -452,12 +503,61 @@ free_irq:
bnx2x_free_irq(bp);
}
+static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q)
+{
+ u8 cl_id = vfq_cl_id(vf, q);
+ u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ /* mac */
+ bnx2x_init_mac_obj(bp, &q->mac_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, mac_rdata),
+ bnx2x_vf_sp_map(bp, vf, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->macs_pool);
+ /* vlan */
+ bnx2x_init_vlan_obj(bp, &q->vlan_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, vlan_rdata),
+ bnx2x_vf_sp_map(bp, vf, vlan_rdata),
+ BNX2X_FILTER_VLAN_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->vlans_pool);
+
+ /* mcast */
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
+ q->cid, func_id, func_id,
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ /* rss */
+ bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
+ func_id, func_id,
+ bnx2x_vf_sp(bp, vf, rss_rdata),
+ bnx2x_vf_sp_map(bp, vf, rss_rdata),
+ BNX2X_FILTER_RSS_CONF_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ vf->leading_rss = cl_id;
+ q->is_leading = true;
+ q->sp_initialized = true;
+}
+
/* ask the pf to open a queue for the vf */
-int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool is_leading)
{
struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+ u8 fp_idx = fp->index;
u16 tpa_agg_size = 0, flags = 0;
int rc;
@@ -473,6 +573,9 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
tpa_agg_size = TPA_AGG_SIZE;
}
+ if (is_leading)
+ flags |= VFPF_QUEUE_FLG_LEADING_RSS;
+
/* calculate queue flags */
flags |= VFPF_QUEUE_FLG_STATS;
flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
@@ -534,7 +637,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
return rc;
}
-int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
{
struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
@@ -570,6 +673,7 @@ int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
return rc;
}
@@ -643,7 +747,76 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
- return 0;
+ return rc;
+}
+
+/* request pf to config rss table for vf queues*/
+int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params)
+{
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
+ int rc = 0;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
+ sizeof(*req));
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+ memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
+ req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
+ req->rss_key_size = T_ETH_RSS_KEY;
+ req->rss_result_mask = params->rss_result_mask;
+
+ /* flags handled individually for backward/forward compatability */
+ if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
+ req->rss_flags |= VFPF_RSS_MODE_DISABLED;
+ if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
+ req->rss_flags |= VFPF_RSS_MODE_REGULAR;
+ if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
+ req->rss_flags |= VFPF_RSS_SET_SRCH;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
+ req->rss_flags |= VFPF_RSS_IPV4;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
+ req->rss_flags |= VFPF_RSS_IPV4_TCP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
+ req->rss_flags |= VFPF_RSS_IPV4_UDP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
+ req->rss_flags |= VFPF_RSS_IPV6;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
+ req->rss_flags |= VFPF_RSS_IPV6_TCP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
+ req->rss_flags |= VFPF_RSS_IPV6_UDP;
+
+ DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ goto out;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ /* Since older drivers don't support this feature (and VF has
+ * no way of knowing other than failing this), don't propagate
+ * an error in this case.
+ */
+ DP(BNX2X_MSG_IOV,
+ "Failed to send rss message to PF over VF-PF channel [%d]\n",
+ resp->hdr.status);
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
}
int bnx2x_vfpf_set_mcast(struct net_device *dev)
@@ -723,29 +896,16 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
- switch (mode) {
- case BNX2X_RX_MODE_NONE: /* no Rx */
+ /* Ignore everything accept MODE_NONE */
+ if (mode == BNX2X_RX_MODE_NONE) {
req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
- break;
- case BNX2X_RX_MODE_NORMAL:
+ } else {
+ /* Current PF driver will not look at the specific flags,
+ * but they are required when working with older drivers on hv.
+ */
req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
- break;
- case BNX2X_RX_MODE_ALLMULTI:
- req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
- break;
- case BNX2X_RX_MODE_PROMISC:
- req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
- break;
- default:
- BNX2X_ERR("BAD rx mode (%d)\n", mode);
- rc = -EINVAL;
- goto out;
}
req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
@@ -766,7 +926,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
rc = -EINVAL;
}
-out:
+
bnx2x_vfpf_finalize(bp, &req->first_tlv);
return rc;
@@ -856,64 +1016,68 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
dmae.len = len32;
/* issue the command and wait for completion */
- return bnx2x_issue_dmae_with_comp(bp, &dmae);
+ return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
}
-static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
{
struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
- u64 vf_addr;
- dma_addr_t pf_addr;
u16 length, type;
- int rc;
- struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
/* prepare response */
type = mbx->first_tlv.tl.type;
length = type == CHANNEL_TLV_ACQUIRE ?
sizeof(struct pfvf_acquire_resp_tlv) :
sizeof(struct pfvf_general_resp_tlv);
- bnx2x_add_tlv(bp, resp, 0, type, length);
- resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
- bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
+ bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
+ bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
+}
+
+static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ int vf_rc)
+{
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+ struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
+ dma_addr_t pf_addr;
+ u64 vf_addr;
+ int rc;
+
bnx2x_dp_tlv_list(bp, resp);
DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+ resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
+
/* send response */
vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
mbx->first_tlv.resp_msg_offset;
pf_addr = mbx->msg_mapping +
offsetof(struct bnx2x_vf_mbx_msg, resp);
- /* copy the response body, if there is one, before the header, as the vf
- * is sensitive to the header being written
+ /* Copy the response buffer. The first u64 is written afterwards, as
+ * the vf is sensitive to the header being written
*/
- if (resp->hdr.tl.length > sizeof(u64)) {
- length = resp->hdr.tl.length - sizeof(u64);
- vf_addr += sizeof(u64);
- pf_addr += sizeof(u64);
- rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
- U64_HI(vf_addr),
- U64_LO(vf_addr),
- length/4);
- if (rc) {
- BNX2X_ERR("Failed to copy response body to VF %d\n",
- vf->abs_vfid);
- goto mbx_error;
- }
- vf_addr -= sizeof(u64);
- pf_addr -= sizeof(u64);
+ vf_addr += sizeof(u64);
+ pf_addr += sizeof(u64);
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+ U64_HI(vf_addr),
+ U64_LO(vf_addr),
+ (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy response body to VF %d\n",
+ vf->abs_vfid);
+ goto mbx_error;
}
+ vf_addr -= sizeof(u64);
+ pf_addr -= sizeof(u64);
/* ack the FW */
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
mmiowb();
- /* initiate dmae to send the response */
- mbx->flags &= ~VF_MSG_INPROCESS;
-
/* copy the response header including status-done field,
* must be last dmae, must be after FW is acked
*/
@@ -933,7 +1097,38 @@ static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
return;
mbx_error:
- bnx2x_vf_release(bp, vf, false); /* non blocking */
+ bnx2x_vf_release(bp, vf);
+}
+
+static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ int rc)
+{
+ bnx2x_vf_mbx_resp_single_tlv(bp, vf);
+ bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ void *buffer,
+ u16 *offset)
+{
+ struct vfpf_port_phys_id_resp_tlv *port_id;
+
+ if (!(bp->flags & HAS_PHYS_PORT_ID))
+ return;
+
+ bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
+ sizeof(struct vfpf_port_phys_id_resp_tlv));
+
+ port_id = (struct vfpf_port_phys_id_resp_tlv *)
+ (((u8 *)buffer) + *offset);
+ memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
+
+ /* Offset should continue representing the offset to the tail
+ * of TLV data (outside this function scope)
+ */
+ *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
}
static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -943,15 +1138,17 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
struct pf_vf_resc *resc = &resp->resc;
u8 status = bnx2x_pfvf_status_codes(vfop_status);
+ u16 length;
memset(resp, 0, sizeof(*resp));
/* fill in pfdev info */
resp->pfdev_info.chip_num = bp->common.chip_id;
- resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT);
+ resp->pfdev_info.db_size = bp->db_size;
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
- /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
+ PFVF_CAP_TPA |
+ PFVF_CAP_TPA_UPDATE);
bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
sizeof(resp->pfdev_info.fw_ver));
@@ -966,7 +1163,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_max_queue_cnt(bp, vf);
resc->num_sbs = vf_sb_count(vf);
resc->num_mac_filters = vf_mac_rules_cnt(vf);
- resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
+ resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
resc->num_mc_filters = 0;
if (status == PFVF_STATUS_SUCCESS) {
@@ -1016,9 +1213,23 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
resc->hw_sbs[i].sb_qid);
DP_CONT(BNX2X_MSG_IOV, "]\n");
+ /* prepare response */
+ length = sizeof(struct pfvf_acquire_resp_tlv);
+ bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
+
+ /* Handle possible VF requests for physical port identifiers.
+ * 'length' should continue to indicate the offset of the first empty
+ * place in the buffer (i.e., where next TLV should be inserted)
+ */
+ if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
+ CHANNEL_TLV_PHYS_PORT_ID))
+ bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
+
+ bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
/* send the response */
- vf->op_rc = vfop_status;
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
}
static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1050,14 +1261,20 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct vfpf_init_tlv *init = &mbx->msg->req.init;
+ int rc;
/* record ghost addresses from vf message */
vf->spq_map = init->spq_addr;
vf->fw_stat_map = init->stats_addr;
- vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
+ vf->stats_stride = init->stats_stride;
+ rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
+
+ /* set VF multiqueue statistics collection mode */
+ if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
+ vf->cfg_flags |= VF_CFG_STATS_COALESCE;
/* response */
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
/* convert MBX queue-flags to standard SP queue-flags */
@@ -1080,6 +1297,8 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
__set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
+ __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
/* outer vlan removal is set according to PF's multi function mode */
if (IS_MF_SD(bp))
@@ -1090,16 +1309,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ struct bnx2x_vf_queue_construct_params qctor;
+ int rc = 0;
/* verify vf_qid */
if (setup_q->vf_qid >= vf_rxq_count(vf)) {
BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
setup_q->vf_qid, vf_rxq_count(vf));
- vf->op_rc = -EINVAL;
+ rc = -EINVAL;
goto response;
}
@@ -1113,10 +1330,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_p;
struct bnx2x_queue_setup_params *setup_p;
+ if (bnx2x_vfq_is_leading(q))
+ bnx2x_leading_vfq_init(bp, vf, q);
+
/* re-init the VF operation context */
- memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
- setup_p = &vf->op_params.qctor.prep_qsetup;
- init_p = &vf->op_params.qctor.qstate.params.init;
+ memset(&qctor, 0 ,
+ sizeof(struct bnx2x_vf_queue_construct_params));
+ setup_p = &qctor.prep_qsetup;
+ init_p = &qctor.qstate.params.init;
/* activate immediately */
__set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
@@ -1190,48 +1411,46 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
setup_q->rxq.cache_line_log;
rxq_params->sb_cq_index = setup_q->rxq.sb_index;
+ /* rx setup - multicast engine */
+ if (bnx2x_vfq_is_leading(q)) {
+ u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ rxq_params->mcast_engine_id = mcast_id;
+ __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
+ }
+
bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
q->index, q->sb_idx);
}
/* complete the preparations */
- bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
+ bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
- vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
- if (vf->op_rc)
+ rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
+ if (rc)
goto response;
- return;
}
response:
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
-enum bnx2x_vfop_filters_state {
- BNX2X_VFOP_MBX_Q_FILTERS_MACS,
- BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
- BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
- BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
- BNX2X_VFOP_MBX_Q_FILTERS_DONE
-};
-
static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct vfpf_set_q_filters_tlv *tlv,
- struct bnx2x_vfop_filters **pfl,
+ struct bnx2x_vf_mac_vlan_filters **pfl,
u32 type_flag)
{
int i, j;
- struct bnx2x_vfop_filters *fl = NULL;
+ struct bnx2x_vf_mac_vlan_filters *fl = NULL;
size_t fsz;
- fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
- sizeof(struct bnx2x_vfop_filters);
+ fsz = tlv->n_mac_vlan_filters *
+ sizeof(struct bnx2x_vf_mac_vlan_filter) +
+ sizeof(struct bnx2x_vf_mac_vlan_filters);
fl = kzalloc(fsz, GFP_KERNEL);
if (!fl)
return -ENOMEM;
- INIT_LIST_HEAD(&fl->head);
-
for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
@@ -1239,17 +1458,17 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
continue;
if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
fl->filters[j].mac = msg_filter->mac;
- fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
+ fl->filters[j].type = BNX2X_VF_FILTER_MAC;
} else {
fl->filters[j].vid = msg_filter->vlan_tag;
- fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
+ fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
}
fl->filters[j].add =
(msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
true : false;
- list_add_tail(&fl->filters[j++].link, &fl->head);
+ fl->count++;
}
- if (list_empty(&fl->head))
+ if (!fl->count)
kfree(fl);
else
*pfl = fl;
@@ -1289,176 +1508,96 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
-static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
- int rc;
+ int rc = 0;
struct vfpf_set_q_filters_tlv *msg =
&BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- enum bnx2x_vfop_filters_state state = vfop->state;
+ /* check for any mac/vlan changes */
+ if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
+ /* build mac list */
+ struct bnx2x_vf_mac_vlan_filters *fl = NULL;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vfop_mbx_qfilters,
- .block = false,
- };
-
- DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
-
- if (vfop->rc < 0)
- goto op_err;
-
- switch (state) {
- case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
- /* next state */
- vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
+ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_MAC_FILTER);
+ if (rc)
+ goto op_err;
- /* check for any vlan/mac changes */
- if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
- /* build mac list */
- struct bnx2x_vfop_filters *fl = NULL;
+ if (fl) {
- vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
- VFPF_MAC_FILTER);
- if (vfop->rc)
+ /* set mac list */
+ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+ msg->vf_qid,
+ false);
+ if (rc)
goto op_err;
-
- if (fl) {
- /* set mac list */
- rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
- msg->vf_qid,
- false);
- if (rc) {
- vfop->rc = rc;
- goto op_err;
- }
- return;
- }
}
- /* fall through */
-
- case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
- /* next state */
- vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
- /* check for any vlan/mac changes */
- if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
- /* build vlan list */
- struct bnx2x_vfop_filters *fl = NULL;
+ /* build vlan list */
+ fl = NULL;
- vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
- VFPF_VLAN_FILTER);
- if (vfop->rc)
+ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_VLAN_FILTER);
+ if (rc)
+ goto op_err;
+
+ if (fl) {
+ /* set vlan list */
+ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+ msg->vf_qid,
+ false);
+ if (rc)
goto op_err;
-
- if (fl) {
- /* set vlan list */
- rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
- msg->vf_qid,
- false);
- if (rc) {
- vfop->rc = rc;
- goto op_err;
- }
- return;
- }
}
- /* fall through */
-
- case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
- /* next state */
- vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
-
- if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
- unsigned long accept = 0;
-
- /* covert VF-PF if mask to bnx2x accept flags */
- if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
- __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
-
- if (msg->rx_mask &
- VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
- __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
-
- if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
- __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
+ }
- if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
+ if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
+ unsigned long accept = 0;
+ struct pf_vf_bulletin_content *bulletin =
+ BP_VF_BULLETIN(bp, vf->index);
- if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
- __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
+ /* Ignore VF requested mode; instead set a regular mode */
+ if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) {
+ __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
+ }
- /* A packet arriving the vf's mac should be accepted
- * with any vlan
- */
+ /* A packet arriving the vf's mac should be accepted
+ * with any vlan, unless a vlan has already been
+ * configured.
+ */
+ if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
- /* set rx-mode */
- rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
- msg->vf_qid, accept);
- if (rc) {
- vfop->rc = rc;
- goto op_err;
- }
- return;
- }
- /* fall through */
-
- case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
- /* next state */
- vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
-
- if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
- /* set mcasts */
- rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
- msg->n_multicast, false);
- if (rc) {
- vfop->rc = rc;
- goto op_err;
- }
- return;
- }
- /* fall through */
-op_done:
- case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
- bnx2x_vfop_end(bp, vf, vfop);
- return;
-op_err:
- BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
- vf->abs_vfid, msg->vf_qid, vfop->rc);
- goto op_done;
-
- default:
- bnx2x_vfop_default(state);
+ /* set rx-mode */
+ rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
+ if (rc)
+ goto op_err;
}
-}
-static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
- bnx2x_vfop_mbx_qfilters, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
- cmd->block);
+ if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
+ /* set mcasts */
+ rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
+ msg->n_multicast, false);
+ if (rc)
+ goto op_err;
}
- return -ENOMEM;
+op_err:
+ if (rc)
+ BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
+ vf->abs_vfid, msg->vf_qid, rc);
+ return rc;
}
-static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vf_mbx *mbx)
+static int bnx2x_filters_validate_mac(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *filters)
{
- struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ int rc = 0;
/* if a mac was already set for this VF via the set vf mac ndo, we only
* accept mac configurations of that mac. Why accept them at all?
@@ -1470,23 +1609,71 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
if (filters->n_mac_vlan_filters > 1) {
BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
vf->abs_vfid);
- vf->op_rc = -EPERM;
+ rc = -EPERM;
goto response;
}
/* ...and only the mac set by the ndo */
if (filters->n_mac_vlan_filters == 1 &&
- memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
+ !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
vf->abs_vfid);
- vf->op_rc = -EPERM;
+ rc = -EPERM;
goto response;
}
}
+response:
+ return rc;
+}
+
+static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *filters)
+{
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
+ int rc = 0;
+
+ /* if vlan was set by hypervisor we don't allow guest to config vlan */
+ if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
+ int i;
+
+ /* search for vlan filters */
+ for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+ if (filters->filters[i].flags &
+ VFPF_Q_FILTER_VLAN_TAG_VALID) {
+ BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+ vf->abs_vfid);
+ rc = -EPERM;
+ goto response;
+ }
+ }
+ }
+
/* verify vf_qid */
- if (filters->vf_qid > vf_rxq_count(vf))
+ if (filters->vf_qid > vf_rxq_count(vf)) {
+ rc = -EPERM;
+ goto response;
+ }
+
+response:
+ return rc;
+}
+
+static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
+ int rc;
+
+ rc = bnx2x_filters_validate_mac(bp, vf, filters);
+ if (rc)
+ goto response;
+
+ rc = bnx2x_filters_validate_vlan(bp, vf, filters);
+ if (rc)
goto response;
DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
@@ -1496,60 +1683,169 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
/* print q_filter message */
bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
- vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
- if (vf->op_rc)
- goto response;
- return;
-
+ rc = bnx2x_vf_mbx_qfilters(bp, vf);
response:
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
int qid = mbx->msg->req.q_op.vf_qid;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ int rc;
DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
vf->abs_vfid, qid);
- vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
- if (vf->op_rc)
- bnx2x_vf_mbx_resp(bp, vf);
+ rc = bnx2x_vf_queue_teardown(bp, vf, qid);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ int rc;
DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
- vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
- if (vf->op_rc)
- bnx2x_vf_mbx_resp(bp, vf);
+ rc = bnx2x_vf_close(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ int rc;
DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
- vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
- if (vf->op_rc)
- bnx2x_vf_mbx_resp(bp, vf);
+ rc = bnx2x_vf_free(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_config_rss_params rss;
+ struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
+ int rc = 0;
+
+ if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
+ rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
+ BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
+ vf->index);
+ rc = -EINVAL;
+ goto mbx_resp;
+ }
+
+ memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
+
+ /* set vfop params according to rss tlv */
+ memcpy(rss.ind_table, rss_tlv->ind_table,
+ T_ETH_INDIRECTION_TABLE_SIZE);
+ memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
+ rss.rss_obj = &vf->rss_conf_obj;
+ rss.rss_result_mask = rss_tlv->rss_result_mask;
+
+ /* flags handled individually for backward/forward compatability */
+ rss.rss_flags = 0;
+ rss.ramrod_flags = 0;
+
+ if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
+ __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
+ __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
+ __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
+ __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
+ __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
+
+ if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
+ rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
+ (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
+ rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
+ BNX2X_ERR("about to hit a FW assert. aborting...\n");
+ rc = -EINVAL;
+ goto mbx_resp;
+ }
+
+ rc = bnx2x_vf_rss_update(bp, vf, &rss);
+mbx_resp:
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static int bnx2x_validate_tpa_params(struct bnx2x *bp,
+ struct vfpf_tpa_tlv *tpa_tlv)
+{
+ int rc = 0;
+
+ if (tpa_tlv->tpa_client_info.max_sges_for_packet >
+ U_ETH_MAX_SGES_FOR_PACKET) {
+ rc = -EINVAL;
+ BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
+ tpa_tlv->tpa_client_info.max_sges_for_packet,
+ U_ETH_MAX_SGES_FOR_PACKET);
+ }
+
+ if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
+ rc = -EINVAL;
+ BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
+ tpa_tlv->tpa_client_info.max_tpa_queues,
+ MAX_AGG_QS(bp));
+ }
+
+ return rc;
+}
+
+static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_queue_update_tpa_params vf_op_params;
+ struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
+ int rc = 0;
+
+ memset(&vf_op_params, 0, sizeof(vf_op_params));
+
+ if (bnx2x_validate_tpa_params(bp, tpa_tlv))
+ goto mbx_resp;
+
+ vf_op_params.complete_on_both_clients =
+ tpa_tlv->tpa_client_info.complete_on_both_clients;
+ vf_op_params.dont_verify_thr =
+ tpa_tlv->tpa_client_info.dont_verify_thr;
+ vf_op_params.max_agg_sz =
+ tpa_tlv->tpa_client_info.max_agg_size;
+ vf_op_params.max_sges_pkt =
+ tpa_tlv->tpa_client_info.max_sges_for_packet;
+ vf_op_params.max_tpa_queues =
+ tpa_tlv->tpa_client_info.max_tpa_queues;
+ vf_op_params.sge_buff_sz =
+ tpa_tlv->tpa_client_info.sge_buff_size;
+ vf_op_params.sge_pause_thr_high =
+ tpa_tlv->tpa_client_info.sge_pause_thr_high;
+ vf_op_params.sge_pause_thr_low =
+ tpa_tlv->tpa_client_info.sge_pause_thr_low;
+ vf_op_params.tpa_mode =
+ tpa_tlv->tpa_client_info.tpa_mode;
+ vf_op_params.update_ipv4 =
+ tpa_tlv->tpa_client_info.update_ipv4;
+ vf_op_params.update_ipv6 =
+ tpa_tlv->tpa_client_info.update_ipv6;
+
+ rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
+
+mbx_resp:
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
/* dispatch request */
@@ -1569,25 +1865,31 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
switch (mbx->first_tlv.tl.type) {
case CHANNEL_TLV_ACQUIRE:
bnx2x_vf_mbx_acquire(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_INIT:
bnx2x_vf_mbx_init_vf(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_SETUP_Q:
bnx2x_vf_mbx_setup_q(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_SET_Q_FILTERS:
bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_TEARDOWN_Q:
bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_CLOSE:
bnx2x_vf_mbx_close_vf(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_RELEASE:
bnx2x_vf_mbx_release_vf(bp, vf, mbx);
- break;
+ return;
+ case CHANNEL_TLV_UPDATE_RSS:
+ bnx2x_vf_mbx_update_rss(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_UPDATE_TPA:
+ bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
+ return;
}
} else {
@@ -1603,36 +1905,28 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
for (i = 0; i < 20; i++)
DP_CONT(BNX2X_MSG_IOV, "%x ",
mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
+ }
- /* test whether we can respond to the VF (do we have an address
- * for it?)
+ /* can we respond to VF (do we have an address for it?) */
+ if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
+ /* notify the VF that we do not support this request */
+ bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
+ } else {
+ /* can't send a response since this VF is unknown to us
+ * just ack the FW to release the mailbox and unlock
+ * the channel.
*/
- if (vf->state == VF_ACQUIRED) {
- /* mbx_resp uses the op_rc of the VF */
- vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
-
- /* notify the VF that we do not support this request */
- bnx2x_vf_mbx_resp(bp, vf);
- } else {
- /* can't send a response since this VF is unknown to us
- * just ack the FW to release the mailbox and unlock
- * the channel.
- */
- storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
- mmiowb();
- bnx2x_unlock_vf_pf_channel(bp, vf,
- mbx->first_tlv.tl.type);
- }
+ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+ /* Firmware ack should be written before unlocking channel */
+ mmiowb();
+ bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
}
}
-/* handle new vf-pf message */
-void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
+void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event)
{
- struct bnx2x_virtf *vf;
- struct bnx2x_vf_mbx *mbx;
u8 vf_idx;
- int rc;
DP(BNX2X_MSG_IOV,
"vf pf event received: vfid %d, address_hi %x, address lo %x",
@@ -1644,47 +1938,73 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
BNX2X_NR_VIRTFN(bp)) {
BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
- goto mbx_done;
+ return;
}
+
vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
- mbx = BP_VF_MBX(bp, vf_idx);
- /* verify an event is not currently being processed -
- * debug failsafe only
- */
- if (mbx->flags & VF_MSG_INPROCESS) {
- BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
- vfpf_event->vf_id);
- goto mbx_done;
- }
- vf = BP_VF(bp, vf_idx);
+ /* Update VFDB with current message and schedule its handling */
+ mutex_lock(&BP_VFDB(bp)->event_mutex);
+ BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi;
+ BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo;
+ BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
+ mutex_unlock(&BP_VFDB(bp)->event_mutex);
- /* save the VF message address */
- mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
- mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
- DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
- mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+ bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
+}
- /* dmae to get the VF request */
- rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
- mbx->vf_addr_hi, mbx->vf_addr_lo,
- sizeof(union vfpf_tlvs)/4);
- if (rc) {
- BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
- goto mbx_error;
- }
+/* handle new vf-pf messages */
+void bnx2x_vf_mbx(struct bnx2x *bp)
+{
+ struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
+ u64 events;
+ u8 vf_idx;
+ int rc;
- /* process the VF message header */
- mbx->first_tlv = mbx->msg->req.first_tlv;
+ if (!vfdb)
+ return;
- /* dispatch the request (will prepare the response) */
- bnx2x_vf_mbx_request(bp, vf, mbx);
- goto mbx_done;
+ mutex_lock(&vfdb->event_mutex);
+ events = vfdb->event_occur;
+ vfdb->event_occur = 0;
+ mutex_unlock(&vfdb->event_mutex);
-mbx_error:
- bnx2x_vf_release(bp, vf, false); /* non blocking */
-mbx_done:
- return;
+ for_each_vf(bp, vf_idx) {
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ /* Handle VFs which have pending events */
+ if (!(events & (1ULL << vf_idx)))
+ continue;
+
+ DP(BNX2X_MSG_IOV,
+ "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
+ vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
+ mbx->first_tlv.resp_msg_offset);
+
+ /* dmae to get the VF request */
+ rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
+ vf->abs_vfid, mbx->vf_addr_hi,
+ mbx->vf_addr_lo,
+ sizeof(union vfpf_tlvs)/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy request VF %d\n",
+ vf->abs_vfid);
+ bnx2x_vf_release(bp, vf);
+ return;
+ }
+
+ /* process the VF message header */
+ mbx->first_tlv = mbx->msg->req.first_tlv;
+
+ /* Clean response buffer to refrain from falsely
+ * seeing chains.
+ */
+ memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+
+ /* dispatch the request (will prepare the response) */
+ bnx2x_vf_mbx_request(bp, vf, mbx);
+ }
}
/* propagate local bulletin board to vf */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index f3ad174a3a6..e21e706762c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -12,8 +12,8 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Ariel Elior <ariel.elior@qlogic.com>
*/
#ifndef VF_PF_IF_H
#define VF_PF_IF_H
@@ -51,6 +51,7 @@ struct hw_sb_info {
#define VFPF_QUEUE_FLG_COS 0x0080
#define VFPF_QUEUE_FLG_HC 0x0100
#define VFPF_QUEUE_FLG_DHC 0x0200
+#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400
#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
@@ -131,6 +132,27 @@ struct vfpf_q_op_tlv {
u8 padding[3];
};
+/* receive side scaling tlv */
+struct vfpf_rss_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u32 rss_flags;
+#define VFPF_RSS_MODE_DISABLED (1 << 0)
+#define VFPF_RSS_MODE_REGULAR (1 << 1)
+#define VFPF_RSS_SET_SRCH (1 << 2)
+#define VFPF_RSS_IPV4 (1 << 3)
+#define VFPF_RSS_IPV4_TCP (1 << 4)
+#define VFPF_RSS_IPV4_UDP (1 << 5)
+#define VFPF_RSS_IPV6 (1 << 6)
+#define VFPF_RSS_IPV6_TCP (1 << 7)
+#define VFPF_RSS_IPV6_UDP (1 << 8)
+ u8 rss_result_mask;
+ u8 ind_table_size;
+ u8 rss_key_size;
+ u8 padding;
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ u32 rss_key[T_ETH_RSS_KEY]; /* hash values */
+};
+
/* acquire response tlv - carries the allocated resources */
struct pfvf_acquire_resp_tlv {
struct pfvf_tlv hdr;
@@ -140,6 +162,7 @@ struct pfvf_acquire_resp_tlv {
#define PFVF_CAP_RSS 0x00000001
#define PFVF_CAP_DHC 0x00000002
#define PFVF_CAP_TPA 0x00000004
+#define PFVF_CAP_TPA_UPDATE 0x00000008
char fw_ver[32];
u16 db_size;
u8 indices_per_sb;
@@ -166,12 +189,26 @@ struct pfvf_acquire_resp_tlv {
} resc;
};
+struct vfpf_port_phys_id_resp_tlv {
+ struct channel_tlv tl;
+ u8 id[ETH_ALEN];
+ u8 padding[2];
+};
+
+#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
+ * stats will be coalesced on
+ * the leading RSS queue
+ */
+
/* Init VF */
struct vfpf_init_tlv {
struct vfpf_first_tlv first_tlv;
aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
aligned_u64 spq_addr;
aligned_u64 stats_addr;
+ u16 stats_stride;
+ u32 flags;
+ u32 padding[2];
};
/* Setup Queue */
@@ -267,6 +304,25 @@ struct vfpf_set_q_filters_tlv {
u32 rx_mask; /* see mask constants at the top of the file */
};
+struct vfpf_tpa_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_tpa_client_info {
+ aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 max_tpa_queues;
+ u8 max_sges_for_packet;
+ u8 complete_on_both_clients;
+ u8 dont_verify_thr;
+ u8 tpa_mode;
+ u16 sge_buff_size;
+ u16 max_agg_size;
+ u16 sge_pause_thr_low;
+ u16 sge_pause_thr_high;
+ } tpa_client_info;
+};
+
/* close VF (disable VF) */
struct vfpf_close_tlv {
struct vfpf_first_tlv first_tlv;
@@ -293,13 +349,15 @@ union vfpf_tlvs {
struct vfpf_q_op_tlv q_op;
struct vfpf_setup_q_tlv setup_q;
struct vfpf_set_q_filters_tlv set_q_filters;
- struct vfpf_release_tlv release;
- struct channel_list_end_tlv list_end;
+ struct vfpf_release_tlv release;
+ struct vfpf_rss_tlv update_rss;
+ struct vfpf_tpa_tlv update_tpa;
+ struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
union pfvf_tlvs {
- struct pfvf_general_resp_tlv general_resp;
+ struct pfvf_general_resp_tlv general_resp;
struct pfvf_acquire_resp_tlv acquire_resp;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
@@ -355,14 +413,20 @@ enum channel_tlvs {
CHANNEL_TLV_INIT,
CHANNEL_TLV_SETUP_Q,
CHANNEL_TLV_SET_Q_FILTERS,
+ CHANNEL_TLV_ACTIVATE_Q,
+ CHANNEL_TLV_DEACTIVATE_Q,
CHANNEL_TLV_TEARDOWN_Q,
CHANNEL_TLV_CLOSE,
CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_UPDATE_RSS_DEPRECATED,
CHANNEL_TLV_PF_RELEASE_VF,
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_FLR,
CHANNEL_TLV_PF_SET_MAC,
CHANNEL_TLV_PF_SET_VLAN,
+ CHANNEL_TLV_UPDATE_RSS,
+ CHANNEL_TLV_PHYS_PORT_ID,
+ CHANNEL_TLV_UPDATE_TPA,
CHANNEL_TLV_MAX
};