diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x')
26 files changed, 24415 insertions, 9477 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/Makefile b/drivers/net/ethernet/broadcom/bnx2x/Makefile index 48fbdd48f88..116762daae0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/Makefile +++ b/drivers/net/ethernet/broadcom/bnx2x/Makefile @@ -4,4 +4,5 @@ obj-$(CONFIG_BNX2X) += bnx2x.o -bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o +bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o +bnx2x-$(CONFIG_BNX2X_SRIOV) += bnx2x_vfpf.o bnx2x_sriov.o diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 8c73d34b2ff..8206a293e6b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1,21 +1,24 @@ /* bnx2x.h: Broadcom Everest network driver. * - * Copyright (c) 2007-2011 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver */ #ifndef BNX2X_H #define BNX2X_H + +#include <linux/pci.h> #include <linux/netdevice.h> #include <linux/dma-mapping.h> #include <linux/types.h> +#include <linux/pci_regs.h> /* compilation time flags */ @@ -23,69 +26,83 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.70.35-0" -#define DRV_MODULE_RELDATE "2011/11/10" +#define DRV_MODULE_VERSION "1.78.19-0" +#define DRV_MODULE_RELDATE "2014/02/10" #define BNX2X_BC_VER 0x040200 #if defined(CONFIG_DCB) #define BCM_DCBNL #endif -#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) -#define BCM_CNIC 1 + +#include "bnx2x_hsi.h" + #include "../cnic_if.h" -#endif -#ifdef BCM_CNIC -#define BNX2X_MIN_MSIX_VEC_CNT 3 -#define BNX2X_MSIX_VEC_FP_START 2 -#else -#define BNX2X_MIN_MSIX_VEC_CNT 2 -#define BNX2X_MSIX_VEC_FP_START 1 -#endif +#define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt) #include <linux/mdio.h> #include "bnx2x_reg.h" #include "bnx2x_fw_defs.h" -#include "bnx2x_hsi.h" +#include "bnx2x_mfw_req.h" #include "bnx2x_link.h" #include "bnx2x_sp.h" #include "bnx2x_dcb.h" #include "bnx2x_stats.h" +#include "bnx2x_vfpf.h" + +enum bnx2x_int_mode { + BNX2X_INT_MODE_MSIX, + BNX2X_INT_MODE_INTX, + BNX2X_INT_MODE_MSI +}; /* error/debug prints */ #define DRV_MODULE_NAME "bnx2x" /* for messages that are currently off */ -#define BNX2X_MSG_OFF 0 -#define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */ -#define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */ -#define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */ -#define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */ -#define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */ -#define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */ +#define BNX2X_MSG_OFF 0x0 +#define BNX2X_MSG_MCP 0x0010000 /* was: NETIF_MSG_HW */ +#define BNX2X_MSG_STATS 0x0020000 /* was: NETIF_MSG_TIMER */ +#define BNX2X_MSG_NVM 0x0040000 /* was: NETIF_MSG_HW */ +#define BNX2X_MSG_DMAE 0x0080000 /* was: NETIF_MSG_HW */ +#define BNX2X_MSG_SP 0x0100000 /* was: NETIF_MSG_INTR */ +#define BNX2X_MSG_FP 0x0200000 /* was: NETIF_MSG_INTR */ +#define BNX2X_MSG_IOV 0x0800000 +#define BNX2X_MSG_IDLE 0x2000000 /* used for idle check*/ +#define BNX2X_MSG_ETHTOOL 0x4000000 +#define BNX2X_MSG_DCB 0x8000000 /* regular debug print */ +#define DP_INNER(fmt, ...) \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__VA_ARGS__); + #define DP(__mask, fmt, ...) \ do { \ - if (bp->msg_enable & (__mask)) \ - pr_notice("[%s:%d(%s)]" fmt, \ - __func__, __LINE__, \ - bp->dev ? (bp->dev->name) : "?", \ - ##__VA_ARGS__); \ + if (unlikely(bp->msg_enable & (__mask))) \ + DP_INNER(fmt, ##__VA_ARGS__); \ +} while (0) + +#define DP_AND(__mask, fmt, ...) \ +do { \ + if (unlikely((bp->msg_enable & (__mask)) == __mask)) \ + DP_INNER(fmt, ##__VA_ARGS__); \ } while (0) #define DP_CONT(__mask, fmt, ...) \ do { \ - if (bp->msg_enable & (__mask)) \ + if (unlikely(bp->msg_enable & (__mask))) \ pr_cont(fmt, ##__VA_ARGS__); \ } while (0) /* errors debug print */ #define BNX2X_DBG_ERR(fmt, ...) \ do { \ - if (netif_msg_probe(bp)) \ + if (unlikely(netif_msg_probe(bp))) \ pr_err("[%s:%d(%s)]" fmt, \ __func__, __LINE__, \ bp->dev ? (bp->dev->name) : "?", \ @@ -104,40 +121,38 @@ do { \ #define BNX2X_ERROR(fmt, ...) \ pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__) - /* before we have a dev->name use dev_info() */ #define BNX2X_DEV_INFO(fmt, ...) \ do { \ - if (netif_msg_probe(bp)) \ + if (unlikely(netif_msg_probe(bp))) \ dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \ } while (0) +/* Error handling */ +void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int); #ifdef BNX2X_STOP_ON_ERROR -void bnx2x_int_disable(struct bnx2x *bp); #define bnx2x_panic() \ do { \ bp->panic = 1; \ BNX2X_ERR("driver assert\n"); \ - bnx2x_int_disable(bp); \ - bnx2x_panic_dump(bp); \ + bnx2x_panic_dump(bp, true); \ } while (0) #else #define bnx2x_panic() \ do { \ bp->panic = 1; \ BNX2X_ERR("driver assert\n"); \ - bnx2x_panic_dump(bp); \ + bnx2x_panic_dump(bp, false); \ } while (0) #endif #define bnx2x_mc_addr(ha) ((ha)->addr) #define bnx2x_uc_addr(ha) ((ha)->addr) -#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) -#define U64_HI(x) (u32)(((u64)(x)) >> 32) +#define U64_LO(x) ((u32)(((u64)(x)) & 0xffffffff)) +#define U64_HI(x) ((u32)(((u64)(x)) >> 32)) #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) - #define REG_ADDR(bp, offset) ((bp->regview) + (offset)) #define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) @@ -240,23 +255,46 @@ enum { BNX2X_MAX_CNIC_ETH_CL_ID_IDX, }; -#define BNX2X_CNIC_START_ETH_CID 48 -enum { +/* use a value high enough to be above all the PFs, which has least significant + * nibble as 8, so when cnic needs to come up with a CID for UIO to use to + * calculate doorbell address according to old doorbell configuration scheme + * (db_msg_sz 1 << 7 * cid + 0x40 DPM offset) it can come up with a valid number + * We must avoid coming up with cid 8 for iscsi since according to this method + * the designated UIO cid will come out 0 and it has a special handling for that + * case which doesn't suit us. Therefore will will cieling to closes cid which + * has least signigifcant nibble 8 and if it is 8 we will move forward to 0x18. + */ + +#define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \ + (bp)->max_cos) +/* amount of cids traversed by UIO's DPM addition to doorbell */ +#define UIO_DPM 8 +/* roundup to DPM offset */ +#define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \ + UIO_DPM)) +/* offset to nearest value which has lsb nibble matching DPM */ +#define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \ + (UIO_DPM * 2)) +/* add offset to rounded-up cid to get a value which could be used with UIO */ +#define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp)) +/* but wait - avoid UIO special case for cid 0 */ +#define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \ + (UIO_DPM_ALIGN(bp) == UIO_DPM)) +/* Properly DPM aligned CID dajusted to cid 0 secal case */ +#define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \ + (UIO_DPM_CID0_OFFSET(bp))) +/* how many cids were wasted - need this value for cid allocation */ +#define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \ + BNX2X_1st_NON_L2_ETH_CID(bp)) /* iSCSI L2 */ - BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID, +#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp)) /* FCoE L2 */ - BNX2X_FCOE_ETH_CID, -}; +#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1) -/** Additional rings budgeting */ -#ifdef BCM_CNIC -#define CNIC_PRESENT 1 -#define FCOE_PRESENT 1 -#else -#define CNIC_PRESENT 0 -#define FCOE_PRESENT 0 -#endif /* BCM_CNIC */ -#define NON_ETH_CONTEXT_USE (FCOE_PRESENT) +#define CNIC_SUPPORT(bp) ((bp)->cnic_support) +#define CNIC_ENABLED(bp) ((bp)->cnic_enabled) +#define CNIC_LOADED(bp) ((bp)->cnic_loaded) +#define FCOE_INIT(bp) ((bp)->fcoe_init) #define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR @@ -268,29 +306,28 @@ enum { #define FIRST_TX_ONLY_COS_INDEX 1 #define FIRST_TX_COS_INDEX 0 -/* defines for decodeing the fastpath index and the cos index out of the - * transmission queue index - */ -#define MAX_TXQS_PER_COS FP_SB_MAX_E1x - -#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS) -#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS) - /* rules for calculating the cids of tx-only connections */ -#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS) -#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS) +#define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp)) +#define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \ + (cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp)) /* fp index inside class of service range */ -#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS) - -/* - * 0..15 eth cos0 - * 16..31 eth cos1 if applicable - * 32..47 eth cos2 If applicable - * fcoe queue follows eth queues (16, 32, 48 depending on cos) +#define FP_COS_TO_TXQ(fp, cos, bp) \ + ((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp)) + +/* Indexes for transmission queues array: + * txdata for RSS i CoS j is at location i + (j * num of RSS) + * txdata for FCoE (if exist) is at location max cos * num of RSS + * txdata for FWD (if exist) is one location after FCoE + * txdata for OOO (if exist) is one location after FWD */ -#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos) -#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp)) +enum { + FCOE_TXQ_IDX_OFFSET, + FWD_TXQ_IDX_OFFSET, + OOO_TXQ_IDX_OFFSET, +}; +#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos) +#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET) /* fast path */ /* @@ -309,6 +346,7 @@ struct sw_tx_bd { u8 flags; /* Set on the first BD descriptor when there is a split BD */ #define BNX2X_TSO_SPLIT_BD (1<<0) +#define BNX2X_HAS_SECOND_PBD (1<<1) }; struct sw_rx_page { @@ -341,6 +379,9 @@ union db_prod { #define SGE_PAGE_SIZE PAGE_SIZE #define SGE_PAGE_SHIFT PAGE_SHIFT #define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) +#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE) +#define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \ + SGE_PAGES), 0xffff) /* SGE ring related macros */ #define NUM_RX_SGE_PAGES 2 @@ -360,7 +401,7 @@ union db_prod { /* * Number of required SGEs is the sum of two: * 1. Number of possible opened aggregations (next packet for - * these aggregations will probably consume SGE immidiatelly) + * these aggregations will probably consume SGE immediately) * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only * after placement on BD for new TPA aggregation) * @@ -381,7 +422,6 @@ union db_prod { #define BIT_VEC64_ELEM_SHIFT 6 #define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1) - #define __BIT_VEC64_SET_BIT(el, bit) \ do { \ el = ((el) | ((u64)0x1 << (bit))); \ @@ -392,7 +432,6 @@ union db_prod { el = ((el) & (~((u64)0x1 << (bit)))); \ } while (0) - #define BIT_VEC64_SET_BIT(vec64, idx) \ __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ (idx) & BIT_VEC64_ELEM_MASK) @@ -413,8 +452,6 @@ union db_prod { /*******************************************************/ - - /* Number of u64 elements in SGE mask array */ #define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ) #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) @@ -445,6 +482,9 @@ struct bnx2x_agg_info { u16 vlan_tag; u16 len_on_bd; u32 rxhash; + enum pkt_hash_types rxhash_type; + u16 gro_size; + u16 full_page; }; #define Q_STATS_OFFSET32(stat_name) \ @@ -471,26 +511,51 @@ struct bnx2x_fp_txdata { __le16 *tx_cons_sb; int txq_index; + struct bnx2x_fastpath *parent_fp; + int tx_ring_size; +}; + +enum bnx2x_tpa_mode_t { + TPA_MODE_LRO, + TPA_MODE_GRO }; struct bnx2x_fastpath { struct bnx2x *bp; /* parent */ -#define BNX2X_NAPI_WEIGHT 128 struct napi_struct napi; + +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int state; +#define BNX2X_FP_STATE_IDLE 0 +#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ +#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ +#define BNX2X_FP_STATE_DISABLED (1 << 2) +#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */ +#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */ +#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) +#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) +#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED) +#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) + /* protect state */ + spinlock_t lock; +#endif /* CONFIG_NET_RX_BUSY_POLL */ + union host_hc_status_block status_blk; - /* chip independed shortcuts into sb structure */ + /* chip independent shortcuts into sb structure */ __le16 *sb_index_values; __le16 *sb_running_index; - /* chip independed shortcut into rx_prods_offset memory */ + /* chip independent shortcut into rx_prods_offset memory */ u32 ustorm_rx_prods_offset; u32 rx_buf_size; - + u32 rx_frag_size; /* 0 if kmalloced(), or rx_buf_size + NET_SKB_PAD */ dma_addr_t status_blk_mapping; + enum bnx2x_tpa_mode_t mode; + u8 max_cos; /* actual number of active tx coses */ - struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS]; + struct bnx2x_fp_txdata *txdata_ptr[BNX2X_MULTI_TX_COS]; struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */ struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */ @@ -530,55 +595,174 @@ struct bnx2x_fastpath { rx_calls; /* TPA related */ - struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; + struct bnx2x_agg_info *tpa_info; u8 disable_tpa; #ifdef BNX2X_STOP_ON_ERROR u64 tpa_queue_used; #endif - - struct tstorm_per_queue_stats old_tclient; - struct ustorm_per_queue_stats old_uclient; - struct xstorm_per_queue_stats old_xclient; - struct bnx2x_eth_q_stats eth_q_stats; - /* The size is calculated using the following: sizeof name field from netdev structure + 4 ('-Xx-' string) + 4 (for the digits and to make it DWORD aligned) */ #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) char name[FP_NAME_SIZE]; +}; - /* MACs object */ - struct bnx2x_vlan_mac_obj mac_obj; +#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var) +#define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index]) +#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) +#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) - /* Queue State object */ - struct bnx2x_queue_sp_obj q_obj; +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) +{ + spin_lock_init(&fp->lock); + fp->state = BNX2X_FP_STATE_IDLE; +} -}; +/* called from the device poll routine to get ownership of a FP */ +static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) +{ + bool rc = true; + + spin_lock_bh(&fp->lock); + if (fp->state & BNX2X_FP_LOCKED) { + WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); + fp->state |= BNX2X_FP_STATE_NAPI_YIELD; + rc = false; + } else { + /* we don't care if someone yielded */ + fp->state = BNX2X_FP_STATE_NAPI; + } + spin_unlock_bh(&fp->lock); + return rc; +} -#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) +/* returns true is someone tried to get the FP while napi had it */ +static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) +{ + bool rc = false; -/* Use 2500 as a mini-jumbo MTU for FCoE */ -#define BNX2X_FCOE_MINI_JUMBO_MTU 2500 + spin_lock_bh(&fp->lock); + WARN_ON(fp->state & + (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); -/* FCoE L2 `fastpath' entry is right after the eth entries */ -#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) -#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) -#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) -#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ - txdata[FIRST_TX_COS_INDEX].var) + if (fp->state & BNX2X_FP_STATE_POLL_YIELD) + rc = true; + /* state ==> idle, unless currently disabled */ + fp->state &= BNX2X_FP_STATE_DISABLED; + spin_unlock_bh(&fp->lock); + return rc; +} + +/* called from bnx2x_low_latency_poll() */ +static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) +{ + bool rc = true; + + spin_lock_bh(&fp->lock); + if ((fp->state & BNX2X_FP_LOCKED)) { + fp->state |= BNX2X_FP_STATE_POLL_YIELD; + rc = false; + } else { + /* preserve yield marks */ + fp->state |= BNX2X_FP_STATE_POLL; + } + spin_unlock_bh(&fp->lock); + return rc; +} -#define IS_ETH_FP(fp) (fp->index < \ - BNX2X_NUM_ETH_QUEUES(fp->bp)) -#ifdef BCM_CNIC -#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) -#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) +/* returns true if someone tried to get the FP while it was locked */ +static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) +{ + bool rc = false; + + spin_lock_bh(&fp->lock); + WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); + + if (fp->state & BNX2X_FP_STATE_POLL_YIELD) + rc = true; + + /* state ==> idle, unless currently disabled */ + fp->state &= BNX2X_FP_STATE_DISABLED; + spin_unlock_bh(&fp->lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) +{ + WARN_ON(!(fp->state & BNX2X_FP_OWNED)); + return fp->state & BNX2X_FP_USER_PEND; +} + +/* false if fp is currently owned */ +static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) +{ + int rc = true; + + spin_lock_bh(&fp->lock); + if (fp->state & BNX2X_FP_OWNED) + rc = false; + fp->state |= BNX2X_FP_STATE_DISABLED; + spin_unlock_bh(&fp->lock); + + return rc; +} #else -#define IS_FCOE_FP(fp) false -#define IS_FCOE_IDX(idx) false -#endif +static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) +{ +} +static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) +{ + return true; +} + +static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) +{ + return false; +} + +static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) +{ + return false; +} + +static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) +{ + return false; +} + +static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) +{ + return false; +} +static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) +{ + return true; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +/* Use 2500 as a mini-jumbo MTU for FCoE */ +#define BNX2X_FCOE_MINI_JUMBO_MTU 2500 + +#define FCOE_IDX_OFFSET 0 + +#define FCOE_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) + \ + FCOE_IDX_OFFSET) +#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)]) +#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) +#define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)]) +#define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var) +#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ + txdata_ptr[FIRST_TX_COS_INDEX] \ + ->var) + +#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp)) +#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp)) +#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp)) /* MC hsi */ #define MAX_FETCH_BD 13 /* HW max BDs per packet */ @@ -598,6 +782,23 @@ struct bnx2x_fastpath { #define TX_BD(x) ((x) & MAX_TX_BD) #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) +/* number of NEXT_PAGE descriptors may be required during placement */ +#define NEXT_CNT_PER_TX_PKT(bds) \ + (((bds) + MAX_TX_DESC_CNT - 1) / \ + MAX_TX_DESC_CNT * NEXT_PAGE_TX_DESC_CNT) +/* max BDs per tx packet w/o next_pages: + * START_BD - describes packed + * START_BD(splitted) - includes unpaged data segment for GSO + * PARSING_BD - for TSO and CSUM data + * PARSING_BD2 - for encapsulation data + * Frag BDs - describes pages for frags + */ +#define BDS_PER_TX_PKT 4 +#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT) +/* max BDs per tx packet including next pages */ +#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \ + NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT)) + /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ #define NUM_RX_RINGS 8 #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) @@ -670,12 +871,10 @@ struct bnx2x_fastpath { FW_DROP_LEVEL(bp)) #define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) - /* This is needed for determining of last_max */ #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) #define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b)) - #define BNX2X_SWCID_SHIFT 17 #define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) @@ -689,36 +888,39 @@ struct bnx2x_fastpath { #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) #define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ -#define BNX2X_DB_SHIFT 7 /* 128 bytes*/ +#define BNX2X_DB_SHIFT 3 /* 8 bytes*/ #if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) #error "Min DB doorbell stride is 8" #endif -#define DPM_TRIGER_TYPE 0x40 #define DOORBELL(bp, cid, val) \ do { \ - writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \ - DPM_TRIGER_TYPE); \ + writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \ } while (0) - /* TX CSUM helpers */ #define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \ skb->csum_offset) #define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \ skb->csum_offset)) -#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff) +#define pbd_tcp_flags(tcp_hdr) (ntohl(tcp_flag_word(tcp_hdr))>>16 & 0xff) -#define XMIT_PLAIN 0 -#define XMIT_CSUM_V4 0x1 -#define XMIT_CSUM_V6 0x2 -#define XMIT_CSUM_TCP 0x4 -#define XMIT_GSO_V4 0x8 -#define XMIT_GSO_V6 0x10 +#define XMIT_PLAIN 0 +#define XMIT_CSUM_V4 (1 << 0) +#define XMIT_CSUM_V6 (1 << 1) +#define XMIT_CSUM_TCP (1 << 2) +#define XMIT_GSO_V4 (1 << 3) +#define XMIT_GSO_V6 (1 << 4) +#define XMIT_CSUM_ENC_V4 (1 << 5) +#define XMIT_CSUM_ENC_V6 (1 << 6) +#define XMIT_GSO_ENC_V4 (1 << 7) +#define XMIT_GSO_ENC_V6 (1 << 8) -#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6) -#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6) +#define XMIT_CSUM_ENC (XMIT_CSUM_ENC_V4 | XMIT_CSUM_ENC_V6) +#define XMIT_GSO_ENC (XMIT_GSO_ENC_V4 | XMIT_GSO_ENC_V6) +#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6 | XMIT_CSUM_ENC) +#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6 | XMIT_GSO_ENC) /* stuff added to make the code fit 80Col */ #define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) @@ -729,21 +931,6 @@ struct bnx2x_fastpath { #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG -#define BNX2X_IP_CSUM_ERR(cqe) \ - (!((cqe)->fast_path_cqe.status_flags & \ - ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \ - ((cqe)->fast_path_cqe.type_error_flags & \ - ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) - -#define BNX2X_L4_CSUM_ERR(cqe) \ - (!((cqe)->fast_path_cqe.status_flags & \ - ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \ - ((cqe)->fast_path_cqe.type_error_flags & \ - ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) - -#define BNX2X_RX_CSUM_OK(cqe) \ - (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe))) - #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ (((le16_to_cpu(flags) & \ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ @@ -752,7 +939,6 @@ struct bnx2x_fastpath { #define BNX2X_RX_SUM_FIX(cqe) \ BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) - #define FP_USB_FUNC_OFF \ offsetof(struct cstorm_status_block_u, func) #define FP_CSB_FUNC_OFF \ @@ -794,35 +980,64 @@ struct bnx2x_common { #define CHIP_NUM_57711E 0x1650 #define CHIP_NUM_57712 0x1662 #define CHIP_NUM_57712_MF 0x1663 +#define CHIP_NUM_57712_VF 0x166f #define CHIP_NUM_57713 0x1651 #define CHIP_NUM_57713E 0x1652 #define CHIP_NUM_57800 0x168a #define CHIP_NUM_57800_MF 0x16a5 +#define CHIP_NUM_57800_VF 0x16a9 #define CHIP_NUM_57810 0x168e #define CHIP_NUM_57810_MF 0x16ae -#define CHIP_NUM_57840 0x168d -#define CHIP_NUM_57840_MF 0x16ab +#define CHIP_NUM_57810_VF 0x16af +#define CHIP_NUM_57811 0x163d +#define CHIP_NUM_57811_MF 0x163e +#define CHIP_NUM_57811_VF 0x163f +#define CHIP_NUM_57840_OBSOLETE 0x168d +#define CHIP_NUM_57840_MF_OBSOLETE 0x16ab +#define CHIP_NUM_57840_4_10 0x16a1 +#define CHIP_NUM_57840_2_20 0x16a2 +#define CHIP_NUM_57840_MF 0x16a4 +#define CHIP_NUM_57840_VF 0x16ad #define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) #define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) #define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) #define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) +#define CHIP_IS_57712_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_VF) #define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF) #define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800) #define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) +#define CHIP_IS_57800_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_VF) #define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) #define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) -#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840) -#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF) +#define CHIP_IS_57810_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_VF) +#define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811) +#define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF) +#define CHIP_IS_57811_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_VF) +#define CHIP_IS_57840(bp) \ + ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \ + (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \ + (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) +#define CHIP_IS_57840_MF(bp) ((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \ + (CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE)) +#define CHIP_IS_57840_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_VF) #define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ CHIP_IS_57711E(bp)) +#define CHIP_IS_57811xx(bp) (CHIP_IS_57811(bp) || \ + CHIP_IS_57811_MF(bp) || \ + CHIP_IS_57811_VF(bp)) #define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ - CHIP_IS_57712_MF(bp)) + CHIP_IS_57712_MF(bp) || \ + CHIP_IS_57712_VF(bp)) #define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \ CHIP_IS_57800_MF(bp) || \ + CHIP_IS_57800_VF(bp) || \ CHIP_IS_57810(bp) || \ CHIP_IS_57810_MF(bp) || \ + CHIP_IS_57810_VF(bp) || \ + CHIP_IS_57811xx(bp) || \ CHIP_IS_57840(bp) || \ - CHIP_IS_57840_MF(bp)) + CHIP_IS_57840_MF(bp) || \ + CHIP_IS_57840_VF(bp)) #define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) #define USES_WARPCORE(bp) (CHIP_IS_E3(bp)) #define IS_E1H_OFFSET (!CHIP_IS_E1(bp)) @@ -856,6 +1071,18 @@ struct bnx2x_common { (CHIP_REV(bp) == CHIP_REV_Bx)) #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ (CHIP_REV(bp) == CHIP_REV_Ax)) +/* This define is used in two main places: + * 1. In the early stages of nic_load, to know if to configure Parser / Searcher + * to nic-only mode or to offload mode. Offload mode is configured if either the + * chip is E1x (where MIC_MODE register is not applicable), or if cnic already + * registered for this port (which means that the user wants storage services). + * 2. During cnic-related load, to know if offload mode is already configured in + * the HW or needs to be configured. + * Since the transition from nic-mode to offload-mode in HW causes traffic + * corruption, nic-mode is configured only in ports on which storage services + * where never requested. + */ +#define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp)) int flash_size; #define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ @@ -895,6 +1122,7 @@ struct bnx2x_common { #define BNX2X_IGU_STAS_MSG_VF_CNT 64 #define BNX2X_IGU_STAS_MSG_PF_CNT 4 +#define MAX_IGU_ATTN_ACK_TO 100 /* end of common */ /* port */ @@ -916,7 +1144,6 @@ struct bnx2x_port { /* used to synchronize phy accesses */ struct mutex phy_mutex; - int need_hw_lock; u32 port_stx; @@ -929,13 +1156,28 @@ struct bnx2x_port { (offsetof(struct bnx2x_eth_stats, stat_name) / 4) /* slow path */ +#define BNX2X_MAX_NUM_OF_VFS 64 +#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */ +#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) -/* slow path work-queue */ -extern struct workqueue_struct *bnx2x_wq; +/* We need to reserve doorbell addresses for all VF and queue combinations */ +#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF) -#define BNX2X_MAX_NUM_OF_VFS 64 +/* The doorbell is configured to have the same number of CIDs for PFs and for + * VFs. For this reason the PF CID zone is as large as the VF zone. + */ +#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS +#define BNX2X_MAX_NUM_VF_QUEUES 64 #define BNX2X_VF_ID_INVALID 0xFF +/* the number of VF CIDS multiplied by the amount of bytes reserved for each + * cid must not exceed the size of the VF doorbell + */ +#define BNX2X_VF_BAR_SIZE 512 +#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT)) +#error "VF doorbell bar size is 512" +#endif + /* * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is * control by the number of fast-path status blocks supported by the @@ -948,14 +1190,14 @@ extern struct workqueue_struct *bnx2x_wq; * If the maximum number of FP-SB available is X then: * a. If CNIC is supported it consumes 1 FP-SB thus the max number of * regular L2 queues is Y=X-1 - * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) + * b. In MF mode the actual number of L2 queues is Y= (X-1/MF_factor) * c. If the FCoE L2 queue is supported the actual number of L2 queues * is Y+1 * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for * slow-path interrupts) or Y+2 if CNIC is supported (one additional * FP interrupt context for the CNIC). * e. The number of HW context (CID count) is always X or X+1 if FCoE - * L2 queue is supported. the cid for the FCoE L2 queue is always X. + * L2 queue is supported. The cid for the FCoE L2 queue is always X. */ /* fast-path interrupt contexts E1x */ @@ -969,27 +1211,25 @@ union cdu_context { }; /* CDU host DB constants */ -#define CDU_ILT_PAGE_SZ_HW 3 -#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */ +#define CDU_ILT_PAGE_SZ_HW 2 +#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */ #define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) -#ifdef BCM_CNIC #define CNIC_ISCSI_CID_MAX 256 #define CNIC_FCOE_CID_MAX 2048 #define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX) #define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) -#endif #define QM_ILT_PAGE_SZ_HW 0 #define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ #define QM_CID_ROUND 1024 -#ifdef BCM_CNIC /* TM (timers) host DB constants */ #define TM_ILT_PAGE_SZ_HW 0 #define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ -/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ -#define TM_CONN_NUM 1024 +#define TM_CONN_NUM (BNX2X_FIRST_VF_CID + \ + BNX2X_VF_CIDS + \ + CNIC_ISCSI_CID_MAX) #define TM_ILT_SZ (8 * TM_CONN_NUM) #define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) @@ -1002,8 +1242,6 @@ union cdu_context { #define SRC_T2_SZ SRC_ILT_SZ #define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) -#endif - #define MAX_DMAE_C 8 /* DMA memory not used in fastpath */ @@ -1013,7 +1251,6 @@ struct bnx2x_slowpath { struct eth_classify_rules_ramrod_data e2; } mac_rdata; - union { struct tstorm_eth_mac_filter_config e1x; struct eth_filter_rules_ramrod_data e2; @@ -1030,6 +1267,7 @@ struct bnx2x_slowpath { union { struct client_init_ramrod_data init_data; struct client_update_ramrod_data update_data; + struct tpa_update_ramrod_data tpa_data; } q_rdata; union { @@ -1038,6 +1276,16 @@ struct bnx2x_slowpath { struct flow_control_configuration pfc_config; } func_rdata; + /* afex ramrod can not be a part of func_rdata union because these + * events might arrive in parallel to other events from func_rdata. + * Therefore, if they would have been defined in the same union, + * data can get corrupted. + */ + union { + struct afex_vif_list_ramrod_data viflist_data; + struct function_update_data func_update; + } func_afex_rdata; + /* used by dmae command executer */ struct dmae_command dmae[MAX_DMAE_C]; @@ -1046,7 +1294,6 @@ struct bnx2x_slowpath { struct nig_stats nig_stats; struct host_port_stats port_stats; struct host_func_stats func_stats; - struct host_func_stats func_stats_base; u32 wb_comp; u32 wb_data[4]; @@ -1058,7 +1305,6 @@ struct bnx2x_slowpath { #define bnx2x_sp_mapping(bp, var) \ (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var)) - /* attn group wiring */ #define MAX_DYNAMIC_ATTN_GRPS 8 @@ -1083,12 +1329,14 @@ struct hw_context { /* forward */ struct bnx2x_ilt; +struct bnx2x_vfdb; enum bnx2x_recovery_state { BNX2X_RECOVERY_DONE, BNX2X_RECOVERY_INIT, BNX2X_RECOVERY_WAIT, - BNX2X_RECOVERY_FAILED + BNX2X_RECOVERY_FAILED, + BNX2X_RECOVERY_NIC_LOADING }; /* @@ -1143,26 +1391,66 @@ struct bnx2x_fw_stats_req { }; struct bnx2x_fw_stats_data { - struct stats_counter storm_counters; - struct per_port_stats port; - struct per_pf_stats pf; + struct stats_counter storm_counters; + struct per_port_stats port; + struct per_pf_stats pf; struct fcoe_statistics_params fcoe; - struct per_queue_stats queue_stats[1]; + struct per_queue_stats queue_stats[1]; }; /* Public slow path states */ -enum { +enum sp_rtnl_flag { BNX2X_SP_RTNL_SETUP_TC, BNX2X_SP_RTNL_TX_TIMEOUT, BNX2X_SP_RTNL_FAN_FAILURE, + BNX2X_SP_RTNL_AFEX_F_UPDATE, + BNX2X_SP_RTNL_ENABLE_SRIOV, + BNX2X_SP_RTNL_VFPF_MCAST, + BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, + BNX2X_SP_RTNL_RX_MODE, + BNX2X_SP_RTNL_HYPERVISOR_VLAN, + BNX2X_SP_RTNL_TX_STOP, + BNX2X_SP_RTNL_GET_DRV_VERSION, +}; + +enum bnx2x_iov_flag { + BNX2X_IOV_HANDLE_VF_MSG, + BNX2X_IOV_HANDLE_FLR, }; +struct bnx2x_prev_path_list { + struct list_head list; + u8 bus; + u8 slot; + u8 path; + u8 aer; + u8 undi; +}; + +struct bnx2x_sp_objs { + /* MACs object */ + struct bnx2x_vlan_mac_obj mac_obj; + + /* Queue State object */ + struct bnx2x_queue_sp_obj q_obj; +}; + +struct bnx2x_fp_stats { + struct tstorm_per_queue_stats old_tclient; + struct ustorm_per_queue_stats old_uclient; + struct xstorm_per_queue_stats old_xclient; + struct bnx2x_eth_q_stats eth_q_stats; + struct bnx2x_eth_q_stats_old eth_q_stats_old; +}; struct bnx2x { /* Fields used in the tx and intr/napi performance paths * are grouped together in the beginning of the structure */ struct bnx2x_fastpath *fp; + struct bnx2x_sp_objs *sp_objs; + struct bnx2x_fp_stats *fp_stats; + struct bnx2x_fp_txdata *bnx2x_txq; void __iomem *regview; void __iomem *doorbells; u16 db_size; @@ -1181,6 +1469,25 @@ struct bnx2x { (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) #define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) +#ifdef CONFIG_BNX2X_SRIOV + /* protects vf2pf mailbox from simultaneous access */ + struct mutex vf2pf_mutex; + /* vf pf channel mailbox contains request and response buffers */ + struct bnx2x_vf_mbx_msg *vf2pf_mbox; + dma_addr_t vf2pf_mbox_mapping; + + /* we set aside a copy of the acquire response */ + struct pfvf_acquire_resp_tlv acquire_resp; + + /* bulletin board for messages from pf to vf */ + union pf_vf_bulletin *pf2vf_bulletin; + dma_addr_t pf2vf_bulletin_mapping; + + struct pf_vf_bulletin_content old_bulletin; + + u16 requested_nr_virtfn; +#endif /* CONFIG_BNX2X_SRIOV */ + struct net_device *dev; struct pci_dev *pdev; @@ -1198,6 +1505,8 @@ struct bnx2x { #define ETH_MIN_PACKET_SIZE 60 #define ETH_MAX_PACKET_SIZE 1500 #define ETH_MAX_JUMBO_PACKET_SIZE 9600 +/* TCP with Timestamp Option (32) + IPv6 (40) */ +#define ETH_MAX_TPA_HEADER_SIZE 72 /* Max supported alignment is 256 (8 shift) */ #define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT) @@ -1211,7 +1520,7 @@ struct bnx2x { #define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT) #define BNX2X_FW_RX_ALIGN_END \ - max(1UL << BNX2X_RX_ALIGN_SHIFT, \ + max_t(u64, 1UL << BNX2X_RX_ALIGN_SHIFT, \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) @@ -1243,8 +1552,6 @@ struct bnx2x { __le16 *eq_cons_sb; atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ - - /* Counter for marking that there is a STAT_QUERY ramrod pending */ u16 stats_pending; /* Counter for completed statistics ramrods */ @@ -1260,29 +1567,58 @@ struct bnx2x { #define PCI_32BIT_FLAG (1 << 1) #define ONE_PORT_FLAG (1 << 2) #define NO_WOL_FLAG (1 << 3) -#define USING_DAC_FLAG (1 << 4) #define USING_MSIX_FLAG (1 << 5) #define USING_MSI_FLAG (1 << 6) #define DISABLE_MSI_FLAG (1 << 7) #define TPA_ENABLE_FLAG (1 << 8) #define NO_MCP_FLAG (1 << 9) - -#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) +#define GRO_ENABLE_FLAG (1 << 10) #define MF_FUNC_DIS (1 << 11) #define OWN_CNIC_IRQ (1 << 12) #define NO_ISCSI_OOO_FLAG (1 << 13) #define NO_ISCSI_FLAG (1 << 14) #define NO_FCOE_FLAG (1 << 15) #define BC_SUPPORTS_PFC_STATS (1 << 17) +#define TX_SWITCHING (1 << 18) +#define BC_SUPPORTS_FCOE_FEATURES (1 << 19) +#define USING_SINGLE_MSIX_FLAG (1 << 20) +#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) +#define IS_VF_FLAG (1 << 22) +#define INTERRUPTS_ENABLED_FLAG (1 << 23) +#define BC_SUPPORTS_RMMOD_CMD (1 << 24) +#define HAS_PHYS_PORT_ID (1 << 25) +#define AER_ENABLED (1 << 26) + +#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) + +#ifdef CONFIG_BNX2X_SRIOV +#define IS_VF(bp) ((bp)->flags & IS_VF_FLAG) +#define IS_PF(bp) (!((bp)->flags & IS_VF_FLAG)) +#else +#define IS_VF(bp) false +#define IS_PF(bp) true +#endif #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) #define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) - int pm_cap; + u8 cnic_support; + bool cnic_enabled; + bool cnic_loaded; + struct cnic_eth_dev *(*cnic_probe)(struct net_device *); + + /* Flag that indicates that we can start looking for FCoE L2 queue + * completions in the default status block. + */ + bool fcoe_init; + int mrrs; struct delayed_work sp_task; + struct delayed_work iov_task; + + atomic_t interrupt_occurred; struct delayed_work sp_rtnl_task; struct delayed_work period_task; @@ -1303,16 +1639,17 @@ struct bnx2x { struct bnx2x_common common; struct bnx2x_port port; - struct cmng_struct_per_port cmng; - u32 vn_weight_sum; + struct cmng_init cmng; + u32 mf_config[E1HVN_MAX]; - u32 mf2_config[E2_FUNC_MAX]; + u32 mf_ext_config; u32 path_has_ovlan; /* E3 */ u16 mf_ov; u8 mf_mode; #define IS_MF(bp) (bp->mf_mode != 0) #define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI) #define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD) +#define IS_MF_AFEX(bp) (bp->mf_mode == MULTI_FUNCTION_AFEX) u8 wol; @@ -1328,7 +1665,7 @@ struct bnx2x { u16 rx_ticks_int; u16 rx_ticks; /* Maximal coalescing timeout in us */ -#define BNX2X_MAX_COALESCE_TOUT (0xf0*12) +#define BNX2X_MAX_COALESCE_TOUT (0xff*BNX2X_BTR) u32 lin_cnt; @@ -1343,12 +1680,14 @@ struct bnx2x { #define BNX2X_STATE_DIAG 0xe000 #define BNX2X_STATE_ERROR 0xf000 - int multi_mode; #define BNX2X_MAX_PRIORITY 8 #define BNX2X_MAX_ENTRIES_PER_PRI 16 #define BNX2X_MAX_COS 3 #define BNX2X_MAX_TX_COS 2 int num_queues; + uint num_ethernet_queues; + uint num_cnic_queues; + int num_napi_queues; int disable_tpa; u32 rx_mode; @@ -1361,11 +1700,18 @@ struct bnx2x { u8 igu_dsb_id; u8 igu_base_sb; u8 igu_sb_cnt; + u8 min_msix_vec_cnt; + + u32 igu_base_addr; dma_addr_t def_status_blk_mapping; struct bnx2x_slowpath *slowpath; dma_addr_t slowpath_mapping; + /* Mechanism protecting the drv_info_to_mcp */ + struct mutex drv_info_mutex; + bool drv_info_mng_owner; + /* Total number of FW statistics requests */ u8 fw_stats_num; @@ -1385,14 +1731,18 @@ struct bnx2x { int fw_stats_req_sz; /* - * FW statistics data shortcut (points at the begining of + * FW statistics data shortcut (points at the beginning of * fw_stats buffer + fw_stats_req_sz). */ struct bnx2x_fw_stats_data *fw_stats_data; dma_addr_t fw_stats_data_mapping; int fw_stats_data_sz; - struct hw_context context; + /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB + * context size we need 8 ILT entries. + */ +#define ILT_MAX_L2_LINES 32 + struct hw_context context[ILT_MAX_L2_LINES]; struct bnx2x_ilt *ilt; #define BP_ILT(bp) ((bp)->ilt) @@ -1401,25 +1751,24 @@ struct bnx2x { * Maximum supported number of RSS queues: number of IGU SBs minus one that goes * to CNIC. */ -#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT) +#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_SUPPORT(bp)) /* * Maximum CID count that might be required by the bnx2x: - * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related) + * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI */ -#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\ - NON_ETH_CONTEXT_USE + CNIC_PRESENT) + +#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \ + + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp))) +#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \ + + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp))) #define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ ILT_PAGE_CIDS)) -#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT)) int qm_cid_count; - int dropless_fc; + bool dropless_fc; -#ifdef BCM_CNIC - u32 cnic_flags; -#define BNX2X_CNIC_FLAG_MAC_SET 1 void *t2; dma_addr_t t2_mapping; struct cnic_ops __rcu *cnic_ops; @@ -1438,9 +1787,8 @@ struct bnx2x { struct mutex cnic_mutex; struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; - /* Start index of the "special" (CNIC related) L2 cleints */ + /* Start index of the "special" (CNIC related) L2 clients */ u8 cnic_base_cl_id; -#endif int dmae_ready; /* used to synchronize dmae accesses */ @@ -1461,6 +1809,11 @@ struct bnx2x { u16 stats_counter; struct bnx2x_eth_stats eth_stats; + struct host_func_stats func_stats; + struct bnx2x_eth_stats_old eth_stats_old; + struct bnx2x_net_stats_old net_stats_old; + struct bnx2x_fw_port_stats_old fw_stats_old; + bool stats_init; struct z_stream_s *strm; void *gunzip_buf; @@ -1503,6 +1856,9 @@ struct bnx2x { char fw_ver[32]; const struct firmware *firmware; + struct bnx2x_vfdb *vfdb; +#define IS_SRIOV(bp) ((bp)->vfdb) + /* DCB support on/off */ u16 dcb_state; #define BNX2X_DCB_STATE_OFF 0 @@ -1522,6 +1878,10 @@ struct bnx2x { int dcb_version; /* CAM credit pools */ + + /* used only in sriov */ + struct bnx2x_credit_pool_obj vlans_pool; + struct bnx2x_credit_pool_obj macs_pool; /* RX_MODE object */ @@ -1541,7 +1901,10 @@ struct bnx2x { /* operation indication for the sp_rtnl task */ unsigned long sp_rtnl_state; - /* DCBX Negotation results */ + /* Indication of the IOV tasks */ + unsigned long iov_task_state; + + /* DCBX Negotiation results */ struct dcbx_features dcbx_local_feat; u32 dcbx_error; @@ -1549,6 +1912,9 @@ struct bnx2x { struct dcbx_features dcbx_remote_feat; u32 dcbx_remote_flags; #endif + /* AFEX: store default vlan used */ + int afex_def_vlan_tag; + enum mf_cfg_afex_vlan_mode afex_vlan_mode; u32 pending_max; /* multiple tx classes of service */ @@ -1556,12 +1922,21 @@ struct bnx2x { /* priority to cos mapping */ u8 prio_to_cos[8]; + + int fp_array_size; + u32 dump_preset_idx; + bool stats_started; + struct semaphore stats_sema; + + u8 phys_port_id[ETH_ALEN]; }; /* Tx queues may be less or equal to Rx queues */ extern int num_queues; #define BNX2X_NUM_QUEUES(bp) (bp->num_queues) -#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE) +#define BNX2X_NUM_ETH_QUEUES(bp) ((bp)->num_ethernet_queues) +#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \ + (bp)->num_cnic_queues) #define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) @@ -1588,8 +1963,7 @@ extern int num_queues; #define FUNC_FLG_TPA 0x0008 #define FUNC_FLG_SPQ 0x0010 #define FUNC_FLG_LEADING 0x0020 /* PF only */ - - +#define FUNC_FLG_LEADING_STATS 0x0040 struct bnx2x_func_init_params { /* dma */ dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ @@ -1601,6 +1975,13 @@ struct bnx2x_func_init_params { u16 spq_prod; /* valid iff FUNC_FLG_SPQ */ }; +#define for_each_cnic_queue(bp, var) \ + for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \ + (var)++) \ + if (skip_queue(bp, var)) \ + continue; \ + else + #define for_each_eth_queue(bp, var) \ for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) @@ -1614,6 +1995,22 @@ struct bnx2x_func_init_params { else /* Skip forwarding FP */ +#define for_each_valid_rx_queue(bp, var) \ + for ((var) = 0; \ + (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \ + BNX2X_NUM_ETH_QUEUES(bp)); \ + (var)++) \ + if (skip_rx_queue(bp, var)) \ + continue; \ + else + +#define for_each_rx_queue_cnic(bp, var) \ + for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \ + (var)++) \ + if (skip_rx_queue(bp, var)) \ + continue; \ + else + #define for_each_rx_queue(bp, var) \ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ if (skip_rx_queue(bp, var)) \ @@ -1621,6 +2018,22 @@ struct bnx2x_func_init_params { else /* Skip OOO FP */ +#define for_each_valid_tx_queue(bp, var) \ + for ((var) = 0; \ + (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \ + BNX2X_NUM_ETH_QUEUES(bp)); \ + (var)++) \ + if (skip_tx_queue(bp, var)) \ + continue; \ + else + +#define for_each_tx_queue_cnic(bp, var) \ + for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \ + (var)++) \ + if (skip_tx_queue(bp, var)) \ + continue; \ + else + #define for_each_tx_queue(bp, var) \ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ if (skip_tx_queue(bp, var)) \ @@ -1648,9 +2061,6 @@ struct bnx2x_func_init_params { #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) - - - /** * bnx2x_set_mac_one - configure a single MAC address * @@ -1673,15 +2083,6 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, struct bnx2x_vlan_mac_obj *obj, bool set, int mac_type, unsigned long *ramrod_flags); /** - * Deletes all MACs configured for the specific MAC object. - * - * @param bp Function driver instance - * @param mac_obj MAC object to cleanup - * - * @return zero if all MACs were cleaned - */ - -/** * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object * * @bp: driver handle @@ -1701,12 +2102,15 @@ int bnx2x_del_all_macs(struct bnx2x *bp, /* Init Function API */ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); +void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, + u8 vf_valid, int fw_sb_id, int igu_sb_id); int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); void bnx2x_read_mf_cfg(struct bnx2x *bp); +int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val); /* dmae */ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); @@ -1718,6 +2122,18 @@ u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, bool with_comp, u8 comp_type); +void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, + u8 src_type, u8 dst_type); +int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, + u32 *comp); + +/* FLR related routines */ +u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp); +void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count); +int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt); +u8 bnx2x_is_pcie_pending(struct pci_dev *dev); +int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, + char *msg, u32 poll_cnt); void bnx2x_calc_fc_adv(struct bnx2x *bp); int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, @@ -1725,6 +2141,8 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, void bnx2x_update_coalesce(struct bnx2x *bp); int bnx2x_get_cur_phy_idx(struct bnx2x *bp); +bool bnx2x_port_after_undi(struct bnx2x *bp); + static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, int wait) { @@ -1742,12 +2160,11 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, return val; } -#define BNX2X_ILT_ZALLOC(x, y, size) \ - do { \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ - if (x) \ - memset(x, 0, size); \ - } while (0) +void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, + bool is_pf); + +#define BNX2X_ILT_ZALLOC(x, y, size) \ + x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL) #define BNX2X_ILT_FREE(x, y, size) \ do { \ @@ -1781,11 +2198,11 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define LOAD_NORMAL 0 #define LOAD_OPEN 1 #define LOAD_DIAG 2 +#define LOAD_LOOPBACK_EXT 3 #define UNLOAD_NORMAL 0 #define UNLOAD_CLOSE 1 #define UNLOAD_RECOVERY 2 - /* DMAE command defines */ #define DMAE_TIMEOUT -1 #define DMAE_PCI_ERROR -2 /* E2 and onward */ @@ -1849,7 +2266,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) #define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit - indicates eror */ + * indicates error + */ #define MAX_DMAE_C_PER_PORT 8 #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ @@ -1863,20 +2281,22 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define PCICFG_LINK_SPEED 0xf0000 #define PCICFG_LINK_SPEED_SHIFT 16 - -#define BNX2X_NUM_TESTS 7 +#define BNX2X_NUM_TESTS_SF 7 +#define BNX2X_NUM_TESTS_MF 3 +#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \ + IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF) #define BNX2X_PHY_LOOPBACK 0 #define BNX2X_MAC_LOOPBACK 1 +#define BNX2X_EXT_LOOPBACK 2 #define BNX2X_PHY_LOOPBACK_FAILED 1 #define BNX2X_MAC_LOOPBACK_FAILED 2 +#define BNX2X_EXT_LOOPBACK_FAILED 3 #define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \ BNX2X_PHY_LOOPBACK_FAILED) - #define STROM_ASSERT_ARRAY_SIZE 50 - /* must be used on a CID before placing it on a HW ring */ #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \ @@ -1885,7 +2305,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) - #define BNX2X_BTR 4 #define MAX_SPQ_PENDING 8 @@ -1907,7 +2326,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, /* Memory of fairness algorithm . 2 cycles */ #define FAIR_MEM 2 - #define ATTN_NIG_FOR_FUNC (1L << 8) #define ATTN_SW_TIMER_4_FUNC (1L << 9) #define GPIO_2_FUNC (1L << 10) @@ -1923,6 +2341,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define ATTN_HARD_WIRED_MASK 0xff00 #define ATTENTION_ID 4 +#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_SD(bp) || \ + IS_MF_FCOE_AFEX(bp)) /* stuff added to make the code fit 80Col */ @@ -1950,6 +2370,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT) #define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \ @@ -2011,7 +2432,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define MULTI_MASK 0x7f - #define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func) #define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func) #define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func) @@ -2039,18 +2459,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, (&bp->def_status_blk->sp_sb.\ index_values[HC_SP_INDEX_ETH_DEF_CONS]) -#define SET_FLAG(value, mask, flag) \ - do {\ - (value) &= ~(mask);\ - (value) |= ((flag) << (mask##_SHIFT));\ - } while (0) - -#define GET_FLAG(value, mask) \ - (((value) & (mask)) >> (mask##_SHIFT)) - -#define GET_FIELD(value, fname) \ - (((value) & (fname##_MASK)) >> (fname##_SHIFT)) - #define CAM_IS_INVALID(x) \ (GET_FLAG(x.flags, \ MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \ @@ -2061,7 +2469,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \ TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4) - #ifndef PXP2_REG_PXP2_INT_STS #define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0 #endif @@ -2073,11 +2480,17 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define BNX2X_VPD_LEN 128 #define VENDOR_ID_LEN 4 -int bnx2x_close(struct net_device *dev); +#define VF_ACQUIRE_THRESH 3 +#define VF_ACQUIRE_MAC_FILTERS 1 +#define VF_ACQUIRE_MC_FILTERS 10 + +#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \ + (!((me_reg) & ME_REG_VF_ERR))) +int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err); /* Congestion management fairness mode */ -#define CMNG_FNS_NONE 0 -#define CMNG_FNS_MINMAX 1 +#define CMNG_FNS_NONE 0 +#define CMNG_FNS_MINMAX 1 #define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/ #define HC_SEG_ACCESS_ATTN 4 @@ -2090,18 +2503,55 @@ static const u32 dmae_reg_go_c[] = { DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 }; -void bnx2x_set_ethtool_ops(struct net_device *netdev); +void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev); void bnx2x_notify_link_changed(struct bnx2x *bp); - -#define BNX2X_MF_PROTOCOL(bp) \ +#define BNX2X_MF_SD_PROTOCOL(bp) \ ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK) -#ifdef BCM_CNIC -#define BNX2X_IS_MF_PROTOCOL_ISCSI(bp) \ - (BNX2X_MF_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI) +#define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \ + (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI) -#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) -#endif +#define BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) \ + (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_FCOE) + +#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) +#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) + +#define BNX2X_MF_EXT_PROTOCOL_FCOE(bp) ((bp)->mf_ext_config & \ + MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) + +#define IS_MF_FCOE_AFEX(bp) (IS_MF_AFEX(bp) && BNX2X_MF_EXT_PROTOCOL_FCOE(bp)) +#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \ + (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ + BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) + +#define SET_FLAG(value, mask, flag) \ + do {\ + (value) &= ~(mask);\ + (value) |= ((flag) << (mask##_SHIFT));\ + } while (0) + +#define GET_FLAG(value, mask) \ + (((value) & (mask)) >> (mask##_SHIFT)) + +#define GET_FIELD(value, fname) \ + (((value) & (fname##_MASK)) >> (fname##_SHIFT)) + +enum { + SWITCH_UPDATE, + AFEX_UPDATE, +}; + +#define NUM_MACS 8 + +void bnx2x_set_local_cmng(struct bnx2x *bp); + +void bnx2x_update_mng_version(struct bnx2x *bp); + +#define MCPR_SCRATCH_BASE(bp) \ + (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) + +#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX)) #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 7aee46983be..c43e7238de2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1,12 +1,12 @@ /* bnx2x_cmn.c: Broadcom Everest network driver. * - * Copyright (c) 2007-2011 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver * UDP CSUM errata workaround by Arik Gendelman @@ -21,54 +21,54 @@ #include <linux/if_vlan.h> #include <linux/interrupt.h> #include <linux/ip.h> +#include <net/tcp.h> #include <net/ipv6.h> #include <net/ip6_checksum.h> -#include <linux/firmware.h> +#include <net/busy_poll.h> #include <linux/prefetch.h> #include "bnx2x_cmn.h" #include "bnx2x_init.h" #include "bnx2x_sp.h" +static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp); +static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp); +static int bnx2x_alloc_fp_mem(struct bnx2x *bp); +static int bnx2x_poll(struct napi_struct *napi, int budget); +static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) +{ + int i; -/** - * bnx2x_bz_fp - zero content of the fastpath structure. - * - * @bp: driver handle - * @index: fastpath index to be zeroed - * - * Makes sure the contents of the bp->fp[index].napi is kept - * intact. - */ -static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) + /* Add NAPI objects */ + for_each_rx_queue_cnic(bp, i) { + netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), + bnx2x_poll, NAPI_POLL_WEIGHT); + napi_hash_add(&bnx2x_fp(bp, i, napi)); + } +} + +static void bnx2x_add_all_napi(struct bnx2x *bp) { - struct bnx2x_fastpath *fp = &bp->fp[index]; - struct napi_struct orig_napi = fp->napi; - /* bzero bnx2x_fastpath contents */ - memset(fp, 0, sizeof(*fp)); + int i; - /* Restore the NAPI object as it has been already initialized */ - fp->napi = orig_napi; + /* Add NAPI objects */ + for_each_eth_queue(bp, i) { + netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), + bnx2x_poll, NAPI_POLL_WEIGHT); + napi_hash_add(&bnx2x_fp(bp, i, napi)); + } +} - fp->bp = bp; - fp->index = index; - if (IS_ETH_FP(fp)) - fp->max_cos = bp->max_cos; - else - /* Special queues support only one CoS */ - fp->max_cos = 1; +static int bnx2x_calc_num_queues(struct bnx2x *bp) +{ + int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues(); - /* - * set the tpa flag for each queue. The tpa flag determines the queue - * minimal size so it must be set prior to queue memory allocation - */ - fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0); + /* Reduce memory usage in kdump environment by using only one queue */ + if (reset_devices) + nq = 1; -#ifdef BCM_CNIC - /* We don't want TPA on an FCoE L2 ring */ - if (IS_FCOE_FP(fp)) - fp->disable_tpa = 1; -#endif + nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); + return nq; } /** @@ -81,12 +81,20 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) * Makes sure the contents of the bp->fp[to].napi is kept * intact. This is done by first copying the napi struct from * the target to the source, and then mem copying the entire - * source onto the target + * source onto the target. Update txdata pointers and related + * content. */ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) { struct bnx2x_fastpath *from_fp = &bp->fp[from]; struct bnx2x_fastpath *to_fp = &bp->fp[to]; + struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; + struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to]; + struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; + struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; + int old_max_eth_txqs, new_max_eth_txqs; + int old_txdata_index = 0, new_txdata_index = 0; + struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; /* Copy the NAPI object as it has been already initialized */ from_fp->napi = to_fp->napi; @@ -94,9 +102,91 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) /* Move bnx2x_fastpath contents */ memcpy(to_fp, from_fp, sizeof(*to_fp)); to_fp->index = to; + + /* Retain the tpa_info of the original `to' version as we don't want + * 2 FPs to contain the same tpa_info pointer. + */ + to_fp->tpa_info = old_tpa_info; + + /* move sp_objs contents as well, as their indices match fp ones */ + memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); + + /* move fp_stats contents as well, as their indices match fp ones */ + memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats)); + + /* Update txdata pointers in fp and move txdata content accordingly: + * Each fp consumes 'max_cos' txdata structures, so the index should be + * decremented by max_cos x delta. + */ + + old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos; + new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * + (bp)->max_cos; + if (from == FCOE_IDX(bp)) { + old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; + new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; + } + + memcpy(&bp->bnx2x_txq[new_txdata_index], + &bp->bnx2x_txq[old_txdata_index], + sizeof(struct bnx2x_fp_txdata)); + to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; } -int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ +/** + * bnx2x_fill_fw_str - Fill buffer with FW version string. + * + * @bp: driver handle + * @buf: character buffer to fill with the fw name + * @buf_len: length of the above buffer + * + */ +void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len) +{ + if (IS_PF(bp)) { + u8 phy_fw_ver[PHY_FW_VER_LEN]; + + phy_fw_ver[0] = '\0'; + bnx2x_get_ext_phy_fw_version(&bp->link_params, + phy_fw_ver, PHY_FW_VER_LEN); + strlcpy(buf, bp->fw_ver, buf_len); + snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), + "bc %d.%d.%d%s%s", + (bp->common.bc_ver & 0xff0000) >> 16, + (bp->common.bc_ver & 0xff00) >> 8, + (bp->common.bc_ver & 0xff), + ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); + } else { + bnx2x_vf_fill_fw_str(bp, buf, buf_len); + } +} + +/** + * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact + * + * @bp: driver handle + * @delta: number of eth queues which were not allocated + */ +static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) +{ + int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); + + /* Queue pointer cannot be re-set on an fp-basis, as moving pointer + * backward along the array could cause memory to be overridden + */ + for (cos = 1; cos < bp->max_cos; cos++) { + for (i = 0; i < old_eth_num - delta; i++) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + int new_idx = cos * (old_eth_num - delta) + i; + + memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], + sizeof(struct bnx2x_fp_txdata)); + fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; + } + } +} + +int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ /* free skb in the packet ring at pos idx * return idx of last bd freed @@ -111,19 +201,15 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, struct sk_buff *skb = tx_buf->skb; u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; int nbd; + u16 split_bd_len = 0; /* prefetch skb end pointer to speedup dev_kfree_skb() */ prefetch(&skb->end); - DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", + DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", txdata->txq_index, idx, tx_buf, skb); - /* unmap first bd */ - DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; - dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), - BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); - nbd = le16_to_cpu(tx_start_bd->nbd) - 1; #ifdef BNX2X_STOP_ON_ERROR @@ -141,16 +227,28 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, --nbd; bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - /* ...and the TSO split header bd since they have no mapping */ + if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) { + /* Skip second parse bd... */ + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + } + + /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { + tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; + split_bd_len = BD_UNMAP_LEN(tx_data_bd); --nbd; bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); } + /* unmap first bd */ + dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), + BD_UNMAP_LEN(tx_start_bd) + split_bd_len, + DMA_TO_DEVICE); + /* now free frags */ while (nbd > 0) { - DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); @@ -160,10 +258,11 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, /* release skb */ WARN_ON(!skb); - if (skb) { + if (likely(skb)) { (*pkts_compl)++; (*bytes_compl) += skb->len; } + dev_kfree_skb_any(skb); tx_buf->first_bd = 0; tx_buf->skb = NULL; @@ -191,12 +290,12 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) pkt_cons = TX_BD(sw_cons); - DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u " - " pkt_cons %u\n", + DP(NETIF_MSG_TX_DONE, + "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n", txdata->txq_index, hw_cons, sw_cons, pkt_cons); bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons, - &pkts_compl, &bytes_compl); + &pkts_compl, &bytes_compl); sw_cons++; } @@ -218,7 +317,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) smp_mb(); if (unlikely(netif_tx_queue_stopped(txq))) { - /* Taking tx_lock() is needed to prevent reenabling the queue + /* Taking tx_lock() is needed to prevent re-enabling the queue * while it's empty. This could have happen if rx_action() gets * suspended in bnx2x_tx_int() after the condition before * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): @@ -232,7 +331,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) if ((netif_tx_queue_stopped(txq)) && (bp->state == BNX2X_STATE_OPEN) && - (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)) + (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)) netif_tx_wake_queue(txq); __netif_tx_unlock(txq); @@ -249,13 +348,11 @@ static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, fp->last_max_sge = idx; } -static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, - struct eth_fast_path_rx_cqe *fp_cqe) +static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, + u16 sge_len, + struct eth_end_agg_rx_cqe *cqe) { struct bnx2x *bp = fp->bp; - u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - - le16_to_cpu(fp_cqe->len_on_bd)) >> - SGE_PAGE_SHIFT; u16 last_max, last_elem, first_elem; u16 delta = 0; u16 i; @@ -266,15 +363,15 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, /* First mark all used pages */ for (i = 0; i < sge_len; i++) BIT_VEC64_CLEAR_BIT(fp->sge_mask, - RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i]))); + RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i]))); DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", - sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); + sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); /* Here we assume that the last SGE index is the biggest */ prefetch((void *)(fp->sge_mask)); bnx2x_update_last_max_sge(fp, - le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); + le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); last_max = RX_SGE(fp->last_max_sge); last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; @@ -304,16 +401,26 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, fp->last_max_sge, fp->rx_sge_prod); } -/* Set Toeplitz hash value in the skb using the value from the +/* Get Toeplitz hash value in the skb using the value from the * CQE (calculated by HW). */ static u32 bnx2x_get_rxhash(const struct bnx2x *bp, - const struct eth_fast_path_rx_cqe *cqe) + const struct eth_fast_path_rx_cqe *cqe, + enum pkt_hash_types *rxhash_type) { - /* Set Toeplitz hash from CQE */ + /* Get Toeplitz hash from CQE */ if ((bp->dev->features & NETIF_F_RXHASH) && - (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) + (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) { + enum eth_rss_hash_type htype; + + htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE; + *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) || + (htype == TCP_IPV6_HASH_TYPE)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3; + return le32_to_cpu(cqe->rss_hash_result); + } + *rxhash_type = PKT_HASH_TYPE_NONE; return 0; } @@ -367,7 +474,12 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, tpa_info->tpa_state = BNX2X_TPA_START; tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); tpa_info->placement_offset = cqe->placement_offset; - tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe); + tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type); + if (fp->mode == TPA_MODE_GRO) { + u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); + tpa_info->full_page = SGE_PAGES / gro_size * gro_size; + tpa_info->gro_size = gro_size; + } #ifdef BNX2X_STOP_ON_ERROR fp->tpa_queue_used |= (1 << queue); @@ -386,31 +498,35 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, */ #define TPA_TSTAMP_OPT_LEN 12 /** - * bnx2x_set_lro_mss - calculate the approximate value of the MSS + * bnx2x_set_gro_params - compute GRO values * - * @bp: driver handle + * @skb: packet skb * @parsing_flags: parsing flags from the START CQE * @len_on_bd: total length of the first packet for the * aggregation. + * @pkt_len: length of all segments * * Approximate value of the MSS for this aggregation calculated using * the first packet of it. + * Compute number of aggregated segments, and gso_type. */ -static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, - u16 len_on_bd) +static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags, + u16 len_on_bd, unsigned int pkt_len, + u16 num_of_coalesced_segs) { - /* - * TPA arrgregation won't have either IP options or TCP options + /* TPA aggregation won't have either IP options or TCP options * other than timestamp or IPv6 extension headers. */ u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == - PRS_FLAG_OVERETH_IPV6) + PRS_FLAG_OVERETH_IPV6) { hdrs_len += sizeof(struct ipv6hdr); - else /* IPv4 */ + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + } else { hdrs_len += sizeof(struct iphdr); - + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + } /* Check if there was a TCP timestamp, if there is it's will * always be 12 bytes length: nop nop kind length echo val. @@ -420,31 +536,72 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) hdrs_len += TPA_TSTAMP_OPT_LEN; - return len_on_bd - hdrs_len; + skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len; + + /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count + * to skb_shinfo(skb)->gso_segs + */ + NAPI_GRO_CB(skb)->count = num_of_coalesced_segs; +} + +static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 index, gfp_t gfp_mask) +{ + struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); + struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; + struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + dma_addr_t mapping; + + if (unlikely(page == NULL)) { + BNX2X_ERR("Can't alloc sge\n"); + return -ENOMEM; + } + + mapping = dma_map_page(&bp->pdev->dev, page, 0, + SGE_PAGES, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + __free_pages(page, PAGES_PER_SGE_SHIFT); + BNX2X_ERR("Can't map sge\n"); + return -ENOMEM; + } + + sw_buf->page = page; + dma_unmap_addr_set(sw_buf, mapping, mapping); + + sge->addr_hi = cpu_to_le32(U64_HI(mapping)); + sge->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; } static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, - u16 queue, struct sk_buff *skb, + struct bnx2x_agg_info *tpa_info, + u16 pages, + struct sk_buff *skb, struct eth_end_agg_rx_cqe *cqe, u16 cqe_idx) { struct sw_rx_page *rx_pg, old_rx_pg; - u32 i, frag_len, frag_size, pages; - int err; - int j; - struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; + u32 i, frag_len, frag_size; + int err, j, frag_id = 0; u16 len_on_bd = tpa_info->len_on_bd; + u16 full_page = 0, gro_size = 0; frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; - pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; + + if (fp->mode == TPA_MODE_GRO) { + gro_size = tpa_info->gro_size; + full_page = tpa_info->full_page; + } /* This is needed in order to enable forwarding support */ if (frag_size) - skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, - tpa_info->parsing_flags, len_on_bd); + bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd, + le16_to_cpu(cqe->pkt_len), + le16_to_cpu(cqe->num_of_coalesced_segs)); #ifdef BNX2X_STOP_ON_ERROR - if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { + if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) { BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", pages, cqe_idx); BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); @@ -459,28 +616,44 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* FW gives the indices of the SGE as if the ring is an array (meaning that "next" element will consume 2 indices) */ - frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE)); + if (fp->mode == TPA_MODE_GRO) + frag_len = min_t(u32, frag_size, (u32)full_page); + else /* LRO */ + frag_len = min_t(u32, frag_size, (u32)SGE_PAGES); + rx_pg = &fp->rx_page_ring[sge_idx]; old_rx_pg = *rx_pg; /* If we fail to allocate a substitute page, we simply stop where we are and drop the whole packet */ - err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); + err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC); if (unlikely(err)) { - fp->eth_q_stats.rx_skb_alloc_failed++; + bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; return err; } - /* Unmap the page as we r going to pass it to the stack */ + /* Unmap the page as we're going to pass it to the stack */ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(&old_rx_pg, mapping), - SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); - + SGE_PAGES, DMA_FROM_DEVICE); /* Add one frag and update the appropriate fields in the skb */ - skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); + if (fp->mode == TPA_MODE_LRO) + skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); + else { /* GRO */ + int rem; + int offset = 0; + for (rem = frag_len; rem > 0; rem -= gro_size) { + int len = rem > gro_size ? gro_size : rem; + skb_fill_page_desc(skb, frag_id++, + old_rx_pg.page, offset, len); + if (offset) + get_page(old_rx_pg.page); + offset += len; + } + } skb->data_len += frag_len; - skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE; + skb->truesize += SGE_PAGES; skb->len += frag_len; frag_size -= frag_len; @@ -489,18 +662,94 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, return 0; } +static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data) +{ + if (fp->rx_frag_size) + put_page(virt_to_head_page(data)); + else + kfree(data); +} + +static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask) +{ + if (fp->rx_frag_size) { + /* GFP_KERNEL allocations are used only during initialization */ + if (unlikely(gfp_mask & __GFP_WAIT)) + return (void *)__get_free_page(gfp_mask); + + return netdev_alloc_frag(fp->rx_frag_size); + } + + return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask); +} + +#ifdef CONFIG_INET +static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb) +{ + const struct iphdr *iph = ip_hdr(skb); + struct tcphdr *th; + + skb_set_transport_header(skb, sizeof(struct iphdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), + iph->saddr, iph->daddr, 0); +} + +static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb) +{ + struct ipv6hdr *iph = ipv6_hdr(skb); + struct tcphdr *th; + + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), + &iph->saddr, &iph->daddr, 0); +} + +static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb, + void (*gro_func)(struct bnx2x*, struct sk_buff*)) +{ + skb_set_network_header(skb, 0); + gro_func(bp, skb); + tcp_gro_complete(skb); +} +#endif + +static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, + struct sk_buff *skb) +{ +#ifdef CONFIG_INET + if (skb_shinfo(skb)->gso_size) { + switch (be16_to_cpu(skb->protocol)) { + case ETH_P_IP: + bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum); + break; + case ETH_P_IPV6: + bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum); + break; + default: + BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", + be16_to_cpu(skb->protocol)); + } + } +#endif + skb_record_rx_queue(skb, fp->rx_queue); + napi_gro_receive(&fp->napi, skb); +} + static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, - u16 queue, struct eth_end_agg_rx_cqe *cqe, + struct bnx2x_agg_info *tpa_info, + u16 pages, + struct eth_end_agg_rx_cqe *cqe, u16 cqe_idx) { - struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; struct sw_rx_bd *rx_buf = &tpa_info->first_buf; - u32 pad = tpa_info->placement_offset; + u8 pad = tpa_info->placement_offset; u16 len = tpa_info->len_on_bd; struct sk_buff *skb = NULL; - u8 *data = rx_buf->data; - /* alloc new skb */ - u8 *new_data; + u8 *new_data, *data = rx_buf->data; u8 old_tpa_state = tpa_info->tpa_state; tpa_info->tpa_state = BNX2X_TPA_STOP; @@ -512,21 +761,19 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, goto drop; /* Try to allocate the new data */ - new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); - + new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC); /* Unmap skb in the pool anyway, as we are going to change pool entry status to BNX2X_TPA_STOP even if new skb allocation fails. */ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); if (likely(new_data)) - skb = build_skb(data); + skb = build_skb(data, fp->rx_frag_size); if (likely(skb)) { #ifdef BNX2X_STOP_ON_ERROR if (pad + len > fp->rx_buf_size) { - BNX2X_ERR("skb_put is about to fail... " - "pad %d len %d rx_buf_size %d\n", + BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n", pad, len, fp->rx_buf_size); bnx2x_panic(); return; @@ -535,53 +782,105 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, skb_reserve(skb, pad + NET_SKB_PAD); skb_put(skb, len); - skb->rxhash = tpa_info->rxhash; + skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type); skb->protocol = eth_type_trans(skb, bp->dev); skb->ip_summed = CHECKSUM_UNNECESSARY; - if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) { + if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, + skb, cqe, cqe_idx)) { if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) - __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag); - napi_gro_receive(&fp->napi, skb); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag); + bnx2x_gro_receive(bp, fp, skb); } else { - DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" - " - dropping packet!\n"); + DP(NETIF_MSG_RX_STATUS, + "Failed to allocate new pages - dropping packet!\n"); dev_kfree_skb_any(skb); } - /* put new data in bin */ rx_buf->data = new_data; return; } - kfree(new_data); + if (new_data) + bnx2x_frag_free(fp, new_data); drop: /* drop the packet and keep the buffer in the bin */ DP(NETIF_MSG_RX_STATUS, "Failed to allocate or map a new skb - dropping packet!\n"); - fp->eth_q_stats.rx_skb_alloc_failed++; + bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; } +static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 index, gfp_t gfp_mask) +{ + u8 *data; + struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; + struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; + dma_addr_t mapping; + + data = bnx2x_frag_alloc(fp, gfp_mask); + if (unlikely(data == NULL)) + return -ENOMEM; -int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) + mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, + fp->rx_buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + bnx2x_frag_free(fp, data); + BNX2X_ERR("Can't map rx data\n"); + return -ENOMEM; + } + + rx_buf->data = data; + dma_unmap_addr_set(rx_buf, mapping, mapping); + + rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; +} + +static +void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, + struct bnx2x_fastpath *fp, + struct bnx2x_eth_q_stats *qstats) +{ + /* Do nothing if no L4 csum validation was done. + * We do not check whether IP csum was validated. For IPv4 we assume + * that if the card got as far as validating the L4 csum, it also + * validated the IP csum. IPv6 has no IP csum. + */ + if (cqe->fast_path_cqe.status_flags & + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) + return; + + /* If L4 validation was done, check if an error was found. */ + + if (cqe->fast_path_cqe.type_error_flags & + (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | + ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) + qstats->hw_csum_err++; + else + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) { struct bnx2x *bp = fp->bp; u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; - u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; + u16 sw_comp_cons, sw_comp_prod; int rx_pkt = 0; + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_cqe *cqe_fp; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) return 0; #endif - - /* CQ "next element" is of the size of the regular element, - that's why it's ok here */ - hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); - if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) - hw_comp_cons++; + if (budget <= 0) + return rx_pkt; bd_cons = fp->rx_bd_cons; bd_prod = fp->rx_bd_prod; @@ -589,61 +888,72 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) sw_comp_cons = fp->rx_comp_cons; sw_comp_prod = fp->rx_comp_prod; - /* Memory barrier necessary as speculative reads of the rx - * buffer can be ahead of the index in the status block - */ - rmb(); + comp_ring_cons = RCQ_BD(sw_comp_cons); + cqe = &fp->rx_comp_ring[comp_ring_cons]; + cqe_fp = &cqe->fast_path_cqe; DP(NETIF_MSG_RX_STATUS, - "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", - fp->index, hw_comp_cons, sw_comp_cons); + "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons); - while (sw_comp_cons != hw_comp_cons) { + while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) { struct sw_rx_bd *rx_buf = NULL; struct sk_buff *skb; - union eth_rx_cqe *cqe; - struct eth_fast_path_rx_cqe *cqe_fp; u8 cqe_fp_flags; enum eth_rx_cqe_type cqe_fp_type; - u16 len, pad; + u16 len, pad, queue; u8 *data; + u32 rxhash; + enum pkt_hash_types rxhash_type; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) return 0; #endif - comp_ring_cons = RCQ_BD(sw_comp_cons); bd_prod = RX_BD(bd_prod); bd_cons = RX_BD(bd_cons); - cqe = &fp->rx_comp_ring[comp_ring_cons]; - cqe_fp = &cqe->fast_path_cqe; + /* A rmb() is required to ensure that the CQE is not read + * before it is written by the adapter DMA. PCI ordering + * rules will make sure the other fields are written before + * the marker at the end of struct eth_fast_path_rx_cqe + * but without rmb() a weakly ordered processor can process + * stale data. Without the barrier TPA state-machine might + * enter inconsistent state and kernel stack might be + * provided with incorrect packet description - these lead + * to various kernel crashed. + */ + rmb(); + cqe_fp_flags = cqe_fp->type_error_flags; cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; - DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" - " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), + DP(NETIF_MSG_RX_STATUS, + "CQE type %x err %x status %x queue %x vlan %x len %u\n", + CQE_TYPE(cqe_fp_flags), cqe_fp_flags, cqe_fp->status_flags, le32_to_cpu(cqe_fp->rss_hash_result), - le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len)); + le16_to_cpu(cqe_fp->vlan_tag), + le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len)); /* is this a slowpath msg? */ if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { bnx2x_sp_event(fp, cqe); goto next_cqe; } + rx_buf = &fp->rx_buf_ring[bd_cons]; data = rx_buf->data; if (!CQE_TYPE_FAST(cqe_fp_type)) { + struct bnx2x_agg_info *tpa_info; + u16 frag_size, pages; #ifdef BNX2X_STOP_ON_ERROR /* sanity check */ if (fp->disable_tpa && (CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) - BNX2X_ERR("START/STOP packet while " - "disable_tpa type %x\n", + BNX2X_ERR("START/STOP packet while disable_tpa type %x\n", CQE_TYPE(cqe_fp_type)); #endif @@ -656,28 +966,37 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) bnx2x_tpa_start(fp, queue, bd_cons, bd_prod, cqe_fp); + goto next_rx; - } else { - u16 queue = - cqe->end_agg_cqe.queue_index; - DP(NETIF_MSG_RX_STATUS, - "calling tpa_stop on queue %d\n", - queue); + } + queue = cqe->end_agg_cqe.queue_index; + tpa_info = &fp->tpa_info[queue]; + DP(NETIF_MSG_RX_STATUS, + "calling tpa_stop on queue %d\n", + queue); + + frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) - + tpa_info->len_on_bd; + + if (fp->mode == TPA_MODE_GRO) + pages = (frag_size + tpa_info->full_page - 1) / + tpa_info->full_page; + else + pages = SGE_PAGE_ALIGN(frag_size) >> + SGE_PAGE_SHIFT; - bnx2x_tpa_stop(bp, fp, queue, - &cqe->end_agg_cqe, - comp_ring_cons); + bnx2x_tpa_stop(bp, fp, tpa_info, pages, + &cqe->end_agg_cqe, comp_ring_cons); #ifdef BNX2X_STOP_ON_ERROR - if (bp->panic) - return 0; + if (bp->panic) + return 0; #endif - bnx2x_update_sge_prod(fp, cqe_fp); - goto next_cqe; - } + bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe); + goto next_cqe; } /* non TPA */ - len = le16_to_cpu(cqe_fp->pkt_len); + len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len); pad = cqe_fp->placement_offset; dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), @@ -687,10 +1006,10 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) prefetch(data + pad); /* speedup eth_type_trans() */ /* is this an error packet? */ if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { - DP(NETIF_MSG_RX_ERR, + DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, "ERROR flags %x rx packet %u\n", cqe_fp_flags, sw_comp_cons); - fp->eth_q_stats.rx_err_discard_pkt++; + bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; goto reuse_rx; } @@ -701,31 +1020,32 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) (len <= RX_COPY_THRESH)) { skb = netdev_alloc_skb_ip_align(bp->dev, len); if (skb == NULL) { - DP(NETIF_MSG_RX_ERR, + DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, "ERROR packet dropped because of alloc failure\n"); - fp->eth_q_stats.rx_skb_alloc_failed++; + bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; goto reuse_rx; } memcpy(skb->data, data + pad, len); bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); } else { - if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) { + if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod, + GFP_ATOMIC) == 0)) { dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); - skb = build_skb(data); + skb = build_skb(data, fp->rx_frag_size); if (unlikely(!skb)) { - kfree(data); - fp->eth_q_stats.rx_skb_alloc_failed++; + bnx2x_frag_free(fp, data); + bnx2x_fp_qstats(bp, fp)-> + rx_skb_alloc_failed++; goto next_rx; } skb_reserve(skb, pad); } else { - DP(NETIF_MSG_RX_ERR, - "ERROR packet dropped because " - "of alloc failure\n"); - fp->eth_q_stats.rx_skb_alloc_failed++; + DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, + "ERROR packet dropped because of alloc failure\n"); + bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; reuse_rx: bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); goto next_rx; @@ -736,27 +1056,28 @@ reuse_rx: skb->protocol = eth_type_trans(skb, bp->dev); /* Set Toeplitz hash for a none-LRO skb */ - skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp); + rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type); + skb_set_hash(skb, rxhash, rxhash_type); skb_checksum_none_assert(skb); - if (bp->dev->features & NETIF_F_RXCSUM) { - - if (likely(BNX2X_RX_CSUM_OK(cqe))) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - fp->eth_q_stats.hw_csum_err++; - } + if (bp->dev->features & NETIF_F_RXCSUM) + bnx2x_csum_validate(skb, cqe, fp, + bnx2x_fp_qstats(bp, fp)); skb_record_rx_queue(skb, fp->rx_queue); if (le16_to_cpu(cqe_fp->pars_flags.flags) & PARSING_FLAGS_VLAN) - __vlan_hwaccel_put_tag(skb, + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), le16_to_cpu(cqe_fp->vlan_tag)); - napi_gro_receive(&fp->napi, skb); + skb_mark_napi_id(skb, &fp->napi); + if (bnx2x_fp_ll_polling(fp)) + netif_receive_skb(skb); + else + napi_gro_receive(&fp->napi, skb); next_rx: rx_buf->data = NULL; @@ -768,8 +1089,15 @@ next_cqe: sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); + /* mark CQE as free */ + BNX2X_SEED_CQE(cqe_fp); + if (rx_pkt == budget) break; + + comp_ring_cons = RCQ_BD(sw_comp_cons); + cqe = &fp->rx_comp_ring[comp_ring_cons]; + cqe_fp = &cqe->fast_path_cqe; } /* while */ fp->rx_bd_cons = bd_cons; @@ -793,9 +1121,10 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) struct bnx2x *bp = fp->bp; u8 cos; - DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB " - "[fp %d fw_sd %d igusb %d]\n", + DP(NETIF_MSG_INTR, + "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n", fp->index, fp->fw_sb_id, fp->igu_sb_id); + bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); #ifdef BNX2X_STOP_ON_ERROR @@ -804,10 +1133,8 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) #endif /* Handle Rx and Tx according to MSI-X vector */ - prefetch(fp->rx_cons_sb); - for_each_cos_in_tx_queue(fp, cos) - prefetch(fp->txdata[cos].tx_cons_sb); + prefetch(fp->txdata_ptr[cos]->tx_cons_sb); prefetch(&fp->sb_running_index[SM_RX_ID]); napi_schedule(&bnx2x_fp(bp, fp->index, napi)); @@ -820,14 +1147,12 @@ void bnx2x_acquire_phy_lock(struct bnx2x *bp) { mutex_lock(&bp->port.phy_mutex); - if (bp->port.need_hw_lock) - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); } void bnx2x_release_phy_lock(struct bnx2x *bp) { - if (bp->port.need_hw_lock) - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); mutex_unlock(&bp->port.phy_mutex); } @@ -864,14 +1189,14 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp) * * It uses a none-atomic bit operations because is called under the mutex. */ -static inline void bnx2x_fill_report_data(struct bnx2x *bp, - struct bnx2x_link_report_data *data) +static void bnx2x_fill_report_data(struct bnx2x *bp, + struct bnx2x_link_report_data *data) { u16 line_speed = bnx2x_get_mf_speed(bp); memset(data, 0, sizeof(*data)); - /* Fill the report data: efective line speed */ + /* Fill the report data: effective line speed */ data->line_speed = line_speed; /* Link is down */ @@ -914,7 +1239,7 @@ void bnx2x_link_report(struct bnx2x *bp) * * @bp: driver handle * - * None atomic inmlementation. + * None atomic implementation. * Should be called under the phy_lock. */ void __bnx2x_link_report(struct bnx2x *bp) @@ -922,7 +1247,7 @@ void __bnx2x_link_report(struct bnx2x *bp) struct bnx2x_link_report_data cur_data; /* reread mf_cfg */ - if (!CHIP_IS_E1(bp)) + if (IS_PF(bp) && !CHIP_IS_E1(bp)) bnx2x_read_mf_cfg(bp); /* Read the current link report info */ @@ -983,6 +1308,66 @@ void __bnx2x_link_report(struct bnx2x *bp) } } +static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) +{ + int i; + + for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { + struct eth_rx_sge *sge; + + sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; + sge->addr_hi = + cpu_to_le32(U64_HI(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + + sge->addr_lo = + cpu_to_le32(U64_LO(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + } +} + +static void bnx2x_free_tpa_pool(struct bnx2x *bp, + struct bnx2x_fastpath *fp, int last) +{ + int i; + + for (i = 0; i < last; i++) { + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; + struct sw_rx_bd *first_buf = &tpa_info->first_buf; + u8 *data = first_buf->data; + + if (data == NULL) { + DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); + continue; + } + if (tpa_info->tpa_state == BNX2X_TPA_START) + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(first_buf, mapping), + fp->rx_buf_size, DMA_FROM_DEVICE); + bnx2x_frag_free(fp, data); + first_buf->data = NULL; + } +} + +void bnx2x_init_rx_rings_cnic(struct bnx2x *bp) +{ + int j; + + for_each_rx_queue_cnic(bp, j) { + struct bnx2x_fastpath *fp = &bp->fp[j]; + + fp->rx_bd_cons = 0; + + /* Activate BD ring */ + /* Warning! + * this will generate an interrupt (to the TSTORM) + * must only be done after chip is initialized + */ + bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, + fp->rx_sge_prod); + } +} + void bnx2x_init_rx_rings(struct bnx2x *bp) { int func = BP_FUNC(bp); @@ -990,27 +1375,25 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) int i, j; /* Allocate TPA resources */ - for_each_rx_queue(bp, j) { + for_each_eth_queue(bp, j) { struct bnx2x_fastpath *fp = &bp->fp[j]; DP(NETIF_MSG_IFUP, "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); if (!fp->disable_tpa) { - /* Fill the per-aggregtion pool */ + /* Fill the per-aggregation pool */ for (i = 0; i < MAX_AGG_QS(bp); i++) { struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; struct sw_rx_bd *first_buf = &tpa_info->first_buf; - first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, - GFP_ATOMIC); + first_buf->data = + bnx2x_frag_alloc(fp, GFP_KERNEL); if (!first_buf->data) { - BNX2X_ERR("Failed to allocate TPA " - "skb pool for queue[%d] - " - "disabling TPA on this " - "queue!\n", j); + BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", + j); bnx2x_free_tpa_pool(bp, fp, i); fp->disable_tpa = 1; break; @@ -1029,11 +1412,12 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) for (i = 0, ring_prod = 0; i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { - if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { - BNX2X_ERR("was only able to allocate " - "%d rx sges\n", i); - BNX2X_ERR("disabling TPA for " - "queue[%d]\n", j); + if (bnx2x_alloc_rx_sge(bp, fp, ring_prod, + GFP_KERNEL) < 0) { + BNX2X_ERR("was only able to allocate %d rx sges\n", + i); + BNX2X_ERR("disabling TPA for queue[%d]\n", + j); /* Cleanup already allocated elements */ bnx2x_free_rx_sge_range(bp, fp, ring_prod); @@ -1050,7 +1434,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) } } - for_each_rx_queue(bp, j) { + for_each_eth_queue(bp, j) { struct bnx2x_fastpath *fp = &bp->fp[j]; fp->rx_bd_cons = 0; @@ -1077,28 +1461,45 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) } } -static void bnx2x_free_tx_skbs(struct bnx2x *bp) +static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp) { - int i; u8 cos; + struct bnx2x *bp = fp->bp; - for_each_tx_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; - unsigned pkts_compl = 0, bytes_compl = 0; + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + unsigned pkts_compl = 0, bytes_compl = 0; - u16 sw_prod = txdata->tx_pkt_prod; - u16 sw_cons = txdata->tx_pkt_cons; + u16 sw_prod = txdata->tx_pkt_prod; + u16 sw_cons = txdata->tx_pkt_cons; - while (sw_cons != sw_prod) { - bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), - &pkts_compl, &bytes_compl); - sw_cons++; - } - netdev_tx_reset_queue( - netdev_get_tx_queue(bp->dev, txdata->txq_index)); + while (sw_cons != sw_prod) { + bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), + &pkts_compl, &bytes_compl); + sw_cons++; } + + netdev_tx_reset_queue( + netdev_get_tx_queue(bp->dev, + txdata->txq_index)); + } +} + +static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp) +{ + int i; + + for_each_tx_queue_cnic(bp, i) { + bnx2x_free_tx_skbs_queue(&bp->fp[i]); + } +} + +static void bnx2x_free_tx_skbs(struct bnx2x *bp) +{ + int i; + + for_each_eth_queue(bp, i) { + bnx2x_free_tx_skbs_queue(&bp->fp[i]); } } @@ -1122,7 +1523,16 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) fp->rx_buf_size, DMA_FROM_DEVICE); rx_buf->data = NULL; - kfree(data); + bnx2x_frag_free(fp, data); + } +} + +static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp) +{ + int j; + + for_each_rx_queue_cnic(bp, j) { + bnx2x_free_rx_bds(&bp->fp[j]); } } @@ -1130,7 +1540,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) { int j; - for_each_rx_queue(bp, j) { + for_each_eth_queue(bp, j) { struct bnx2x_fastpath *fp = &bp->fp[j]; bnx2x_free_rx_bds(fp); @@ -1140,6 +1550,12 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) } } +static void bnx2x_free_skbs_cnic(struct bnx2x *bp) +{ + bnx2x_free_tx_skbs_cnic(bp); + bnx2x_free_rx_skbs_cnic(bp); +} + void bnx2x_free_skbs(struct bnx2x *bp) { bnx2x_free_tx_skbs(bp); @@ -1175,21 +1591,26 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) if (nvecs == offset) return; - free_irq(bp->msix_table[offset].vector, bp->dev); - DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", - bp->msix_table[offset].vector); - offset++; -#ifdef BCM_CNIC - if (nvecs == offset) - return; - offset++; -#endif + + /* VFs don't have a default SB */ + if (IS_PF(bp)) { + free_irq(bp->msix_table[offset].vector, bp->dev); + DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", + bp->msix_table[offset].vector); + offset++; + } + + if (CNIC_SUPPORT(bp)) { + if (nvecs == offset) + return; + offset++; + } for_each_eth_queue(bp, i) { if (nvecs == offset) return; - DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d " - "irq\n", i, bp->msix_table[offset].vector); + DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n", + i, bp->msix_table[offset].vector); free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); } @@ -1197,95 +1618,121 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) void bnx2x_free_irq(struct bnx2x *bp) { - if (bp->flags & USING_MSIX_FLAG) - bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + - CNIC_PRESENT + 1); - else if (bp->flags & USING_MSI_FLAG) - free_irq(bp->pdev->irq, bp->dev); - else - free_irq(bp->pdev->irq, bp->dev); + if (bp->flags & USING_MSIX_FLAG && + !(bp->flags & USING_SINGLE_MSIX_FLAG)) { + int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp); + + /* vfs don't have a default status block */ + if (IS_PF(bp)) + nvecs++; + + bnx2x_free_msix_irqs(bp, nvecs); + } else { + free_irq(bp->dev->irq, bp->dev); + } } int bnx2x_enable_msix(struct bnx2x *bp) { - int msix_vec = 0, i, rc, req_cnt; + int msix_vec = 0, i, rc; - bp->msix_table[msix_vec].entry = msix_vec; - DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", - bp->msix_table[0].entry); - msix_vec++; + /* VFs don't have a default status block */ + if (IS_PF(bp)) { + bp->msix_table[msix_vec].entry = msix_vec; + BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n", + bp->msix_table[0].entry); + msix_vec++; + } + + /* Cnic requires an msix vector for itself */ + if (CNIC_SUPPORT(bp)) { + bp->msix_table[msix_vec].entry = msix_vec; + BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n", + msix_vec, bp->msix_table[msix_vec].entry); + msix_vec++; + } -#ifdef BCM_CNIC - bp->msix_table[msix_vec].entry = msix_vec; - DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n", - bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); - msix_vec++; -#endif /* We need separate vectors for ETH queues only (not FCoE) */ for_each_eth_queue(bp, i) { bp->msix_table[msix_vec].entry = msix_vec; - DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " - "(fastpath #%u)\n", msix_vec, msix_vec, i); + BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n", + msix_vec, msix_vec, i); msix_vec++; } - req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1; - - rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); + DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n", + msix_vec); + rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], + BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec); /* * reconfigure number of tx/rx queues according to available * MSI-X vectors */ - if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { - /* how less vectors we will have? */ - int diff = req_cnt - rc; + if (rc == -ENOSPC) { + /* Get by with single vector */ + rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1); + if (rc < 0) { + BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n", + rc); + goto no_msix; + } - DP(NETIF_MSG_IFUP, - "Trying to use less MSI-X vectors: %d\n", rc); + BNX2X_DEV_INFO("Using single MSI-X vector\n"); + bp->flags |= USING_SINGLE_MSIX_FLAG; - rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); + BNX2X_DEV_INFO("set number of queues to 1\n"); + bp->num_ethernet_queues = 1; + bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; + } else if (rc < 0) { + BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); + goto no_msix; + } else if (rc < msix_vec) { + /* how less vectors we will have? */ + int diff = msix_vec - rc; + + BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc); - if (rc) { - DP(NETIF_MSG_IFUP, - "MSI-X is not attainable rc %d\n", rc); - return rc; - } /* * decrease number of queues by number of unallocated entries */ - bp->num_queues -= diff; - - DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n", - bp->num_queues); - } else if (rc) { - /* fall to INTx if not enough memory */ - if (rc == -ENOMEM) - bp->flags |= DISABLE_MSI_FLAG; - DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); - return rc; + bp->num_ethernet_queues -= diff; + bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; + + BNX2X_DEV_INFO("New queue configuration set: %d\n", + bp->num_queues); } bp->flags |= USING_MSIX_FLAG; return 0; + +no_msix: + /* fall to INTx if not enough memory */ + if (rc == -ENOMEM) + bp->flags |= DISABLE_MSI_FLAG; + + return rc; } static int bnx2x_req_msix_irqs(struct bnx2x *bp) { int i, rc, offset = 0; - rc = request_irq(bp->msix_table[offset++].vector, - bnx2x_msix_sp_int, 0, - bp->dev->name, bp->dev); - if (rc) { - BNX2X_ERR("request sp irq failed\n"); - return -EBUSY; + /* no default status block for vf */ + if (IS_PF(bp)) { + rc = request_irq(bp->msix_table[offset++].vector, + bnx2x_msix_sp_int, 0, + bp->dev->name, bp->dev); + if (rc) { + BNX2X_ERR("request sp irq failed\n"); + return -EBUSY; + } } -#ifdef BCM_CNIC - offset++; -#endif + if (CNIC_SUPPORT(bp)) + offset++; + for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", @@ -1304,13 +1751,20 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) } i = BNX2X_NUM_ETH_QUEUES(bp); - offset = 1 + CNIC_PRESENT; - netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" - " ... fp[%d] %d\n", - bp->msix_table[0].vector, - 0, bp->msix_table[offset].vector, - i - 1, bp->msix_table[offset + i - 1].vector); - + if (IS_PF(bp)) { + offset = 1 + CNIC_SUPPORT(bp); + netdev_info(bp->dev, + "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", + bp->msix_table[0].vector, + 0, bp->msix_table[offset].vector, + i - 1, bp->msix_table[offset + i - 1].vector); + } else { + offset = CNIC_SUPPORT(bp); + netdev_info(bp->dev, + "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n", + 0, bp->msix_table[offset].vector, + i - 1, bp->msix_table[offset + i - 1].vector); + } return 0; } @@ -1320,7 +1774,7 @@ int bnx2x_enable_msi(struct bnx2x *bp) rc = pci_enable_msi(bp->pdev); if (rc) { - DP(NETIF_MSG_IFUP, "MSI is not attainable\n"); + BNX2X_DEV_INFO("MSI is not attainable\n"); return -1; } bp->flags |= USING_MSI_FLAG; @@ -1331,27 +1785,30 @@ int bnx2x_enable_msi(struct bnx2x *bp) static int bnx2x_req_irq(struct bnx2x *bp) { unsigned long flags; - int rc; + unsigned int irq; - if (bp->flags & USING_MSI_FLAG) + if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG)) flags = 0; else flags = IRQF_SHARED; - rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, - bp->dev->name, bp->dev); - return rc; + if (bp->flags & USING_MSIX_FLAG) + irq = bp->msix_table[0].vector; + else + irq = bp->pdev->irq; + + return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); } -static inline int bnx2x_setup_irqs(struct bnx2x *bp) +static int bnx2x_setup_irqs(struct bnx2x *bp) { int rc = 0; - if (bp->flags & USING_MSIX_FLAG) { + if (bp->flags & USING_MSIX_FLAG && + !(bp->flags & USING_SINGLE_MSIX_FLAG)) { rc = bnx2x_req_msix_irqs(bp); if (rc) return rc; } else { - bnx2x_ack_int(bp); rc = bnx2x_req_irq(bp); if (rc) { BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); @@ -1359,34 +1816,67 @@ static inline int bnx2x_setup_irqs(struct bnx2x *bp) } if (bp->flags & USING_MSI_FLAG) { bp->dev->irq = bp->pdev->irq; - netdev_info(bp->dev, "using MSI IRQ %d\n", - bp->pdev->irq); + netdev_info(bp->dev, "using MSI IRQ %d\n", + bp->dev->irq); + } + if (bp->flags & USING_MSIX_FLAG) { + bp->dev->irq = bp->msix_table[0].vector; + netdev_info(bp->dev, "using MSIX IRQ %d\n", + bp->dev->irq); } } return 0; } -static inline void bnx2x_napi_enable(struct bnx2x *bp) +static void bnx2x_napi_enable_cnic(struct bnx2x *bp) { int i; - for_each_rx_queue(bp, i) + for_each_rx_queue_cnic(bp, i) { + bnx2x_fp_init_lock(&bp->fp[i]); napi_enable(&bnx2x_fp(bp, i, napi)); + } } -static inline void bnx2x_napi_disable(struct bnx2x *bp) +static void bnx2x_napi_enable(struct bnx2x *bp) { int i; - for_each_rx_queue(bp, i) + for_each_eth_queue(bp, i) { + bnx2x_fp_init_lock(&bp->fp[i]); + napi_enable(&bnx2x_fp(bp, i, napi)); + } +} + +static void bnx2x_napi_disable_cnic(struct bnx2x *bp) +{ + int i; + + for_each_rx_queue_cnic(bp, i) { + napi_disable(&bnx2x_fp(bp, i, napi)); + while (!bnx2x_fp_ll_disable(&bp->fp[i])) + usleep_range(1000, 2000); + } +} + +static void bnx2x_napi_disable(struct bnx2x *bp) +{ + int i; + + for_each_eth_queue(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); + while (!bnx2x_fp_ll_disable(&bp->fp[i])) + usleep_range(1000, 2000); + } } void bnx2x_netif_start(struct bnx2x *bp) { if (netif_running(bp->dev)) { bnx2x_napi_enable(bp); + if (CNIC_LOADED(bp)) + bnx2x_napi_enable_cnic(bp); bnx2x_int_enable(bp); if (bp->state == BNX2X_STATE_OPEN) netif_tx_wake_all_queues(bp->dev); @@ -1397,14 +1887,16 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) { bnx2x_int_disable_sync(bp, disable_hw); bnx2x_napi_disable(bp); + if (CNIC_LOADED(bp)) + bnx2x_napi_disable_cnic(bp); } -u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) { struct bnx2x *bp = netdev_priv(dev); -#ifdef BCM_CNIC - if (!NO_FCOE(bp)) { + if (CNIC_LOADED(bp) && !NO_FCOE(bp)) { struct ethhdr *hdr = (struct ethhdr *)skb->data; u16 ether_type = ntohs(hdr->h_proto); @@ -1420,33 +1912,25 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) return bnx2x_fcoe_tx(bp, txq_index); } -#endif + /* select a non-FCoE queue */ - return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); + return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); } void bnx2x_set_num_queues(struct bnx2x *bp) { - switch (bp->multi_mode) { - case ETH_RSS_MODE_DISABLED: - bp->num_queues = 1; - break; - case ETH_RSS_MODE_REGULAR: - bp->num_queues = bnx2x_calc_num_queues(bp); - break; + /* RSS queues */ + bp->num_ethernet_queues = bnx2x_calc_num_queues(bp); - default: - bp->num_queues = 1; - break; - } + /* override in STORAGE SD modes */ + if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) + bp->num_ethernet_queues = 1; -#ifdef BCM_CNIC - /* override in ISCSI SD mod */ - if (IS_MF_ISCSI_SD(bp)) - bp->num_queues = 1; -#endif /* Add special queues */ - bp->num_queues += NON_ETH_CONTEXT_USE; + bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */ + bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; + + BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); } /** @@ -1463,7 +1947,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp) * * If the actual number of Tx queues (for each CoS) is less than 16 then there * will be the holes at the end of each group of 16 ETh L2 indices (0..15, - * 16..31,...) with indicies that are not coupled with any real Tx queue. + * 16..31,...) with indices that are not coupled with any real Tx queue. * * The proper configuration of skb->queue_mapping is handled by * bnx2x_select_queue() and __skb_tx_hash(). @@ -1471,20 +1955,18 @@ void bnx2x_set_num_queues(struct bnx2x *bp) * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). */ -static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) +static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic) { int rc, tx, rx; - tx = MAX_TXQS_PER_COS * bp->max_cos; + tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; rx = BNX2X_NUM_ETH_QUEUES(bp); /* account for fcoe queue */ -#ifdef BCM_CNIC - if (!NO_FCOE(bp)) { - rx += FCOE_PRESENT; - tx += FCOE_PRESENT; + if (include_cnic && !NO_FCOE(bp)) { + rx++; + tx++; } -#endif rc = netif_set_real_num_tx_queues(bp->dev, tx); if (rc) { @@ -1497,13 +1979,13 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) return rc; } - DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n", + DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n", tx, rx); return rc; } -static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) +static void bnx2x_set_rx_buf_size(struct bnx2x *bp) { int i; @@ -1527,26 +2009,26 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) ETH_OVREHEAD + mtu + BNX2X_FW_RX_ALIGN_END; - /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ + /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */ + if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) + fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; + else + fp->rx_frag_size = 0; } } -static inline int bnx2x_init_rss_pf(struct bnx2x *bp) +static int bnx2x_init_rss(struct bnx2x *bp) { int i; - u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); - /* - * Prepare the inital contents fo the indirection table if RSS is + /* Prepare the initial contents for the indirection table if RSS is * enabled */ - if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { - for (i = 0; i < sizeof(ind_table); i++) - ind_table[i] = - bp->fp->cl_id + - ethtool_rxfh_indir_default(i, num_eth_queues); - } + for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) + bp->rss_conf_obj.ind_table[i] = + bp->fp->cl_id + + ethtool_rxfh_indir_default(i, num_eth_queues); /* * For 57710 and 57711 SEARCHER configuration (rss_keys) is @@ -1556,14 +2038,13 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp) * For 57712 and newer on the other hand it's a per-function * configuration. */ - return bnx2x_config_rss_pf(bp, ind_table, - bp->port.pmf || !CHIP_IS_E1x(bp)); + return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); } -int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) +int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, + bool config_hash, bool enable) { - struct bnx2x_config_rss_params params = {0}; - int i; + struct bnx2x_config_rss_params params = {NULL}; /* Although RSS is meaningless when there is a single HW queue we * still need it enabled in order to have HW Rx hash generated. @@ -1572,60 +2053,46 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) * bp->multi_mode = ETH_RSS_MODE_DISABLED; */ - params.rss_obj = &bp->rss_conf_obj; + params.rss_obj = rss_obj; __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); - /* RSS mode */ - switch (bp->multi_mode) { - case ETH_RSS_MODE_DISABLED: - __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); - break; - case ETH_RSS_MODE_REGULAR: + if (enable) { __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); - break; - case ETH_RSS_MODE_VLAN_PRI: - __set_bit(BNX2X_RSS_MODE_VLAN_PRI, ¶ms.rss_flags); - break; - case ETH_RSS_MODE_E1HOV_PRI: - __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, ¶ms.rss_flags); - break; - case ETH_RSS_MODE_IP_DSCP: - __set_bit(BNX2X_RSS_MODE_IP_DSCP, ¶ms.rss_flags); - break; - default: - BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode); - return -EINVAL; - } - /* If RSS is enabled */ - if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { /* RSS configuration */ __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); + if (rss_obj->udp_rss_v4) + __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); + if (rss_obj->udp_rss_v6) + __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); + } else { + __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); + } - /* Hash bits */ - params.rss_result_mask = MULTI_MASK; - - memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); + /* Hash bits */ + params.rss_result_mask = MULTI_MASK; - if (config_hash) { - /* RSS keys */ - for (i = 0; i < sizeof(params.rss_key) / 4; i++) - params.rss_key[i] = random32(); + memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); - __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); - } + if (config_hash) { + /* RSS keys */ + prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4); + __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); } - return bnx2x_config_rss(bp, ¶ms); + if (IS_PF(bp)) + return bnx2x_config_rss(bp, ¶ms); + else + return bnx2x_vfpf_config_rss(bp, ¶ms); } -static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) +static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) { - struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_state_params func_params = {NULL}; /* Prepare parameters for function state transitions */ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); @@ -1640,14 +2107,14 @@ static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) /* * Cleans the object that have internal lists without sending - * ramrods. Should be run when interrutps are disabled. + * ramrods. Should be run when interrupts are disabled. */ -static void bnx2x_squeeze_objects(struct bnx2x *bp) +void bnx2x_squeeze_objects(struct bnx2x *bp) { int rc; unsigned long ramrod_flags = 0, vlan_mac_flags = 0; - struct bnx2x_mcast_ramrod_params rparam = {0}; - struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; + struct bnx2x_mcast_ramrod_params rparam = {NULL}; + struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; /***************** Cleanup MACs' object first *************************/ @@ -1658,7 +2125,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp) /* Clean ETH primary MAC */ __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); - rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags, + rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc != 0) BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); @@ -1675,11 +2142,15 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp) rparam.mcast_obj = &bp->mcast_obj; __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); - /* Add a DEL command... */ + /* Add a DEL command... - Since we're doing a driver cleanup only, + * we take a lock surrounding both the initial send and the CONTs, + * as we don't want a true completion to disrupt us in the middle. + */ + netif_addr_lock_bh(bp->dev); rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); if (rc < 0) - BNX2X_ERR("Failed to add a new DEL command to a multi-cast " - "object: %d\n", rc); + BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n", + rc); /* ...and wait until all pending commands are cleared */ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); @@ -1687,11 +2158,13 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp) if (rc < 0) { BNX2X_ERR("Failed to clean multi-cast object: %d\n", rc); + netif_addr_unlock_bh(bp->dev); return; } rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); } + netif_addr_unlock_bh(bp->dev); } #ifndef BNX2X_STOP_ON_ERROR @@ -1700,232 +2173,638 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp) (bp)->state = BNX2X_STATE_ERROR; \ goto label; \ } while (0) -#else + +#define LOAD_ERROR_EXIT_CNIC(bp, label) \ + do { \ + bp->cnic_loaded = false; \ + goto label; \ + } while (0) +#else /*BNX2X_STOP_ON_ERROR*/ #define LOAD_ERROR_EXIT(bp, label) \ do { \ (bp)->state = BNX2X_STATE_ERROR; \ (bp)->panic = 1; \ return -EBUSY; \ } while (0) -#endif +#define LOAD_ERROR_EXIT_CNIC(bp, label) \ + do { \ + bp->cnic_loaded = false; \ + (bp)->panic = 1; \ + return -EBUSY; \ + } while (0) +#endif /*BNX2X_STOP_ON_ERROR*/ + +static void bnx2x_free_fw_stats_mem(struct bnx2x *bp) +{ + BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + return; +} + +static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) +{ + int num_groups, vf_headroom = 0; + int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; + + /* number of queues for statistics is number of eth queues + FCoE */ + u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; + + /* Total number of FW statistics requests = + * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper + * and fcoe l2 queue) stats + num of queues (which includes another 1 + * for fcoe l2 queue if applicable) + */ + bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; + + /* vf stats appear in the request list, but their data is allocated by + * the VFs themselves. We don't include them in the bp->fw_stats_num as + * it is used to determine where to place the vf stats queries in the + * request struct + */ + if (IS_SRIOV(bp)) + vf_headroom = bnx2x_vf_headroom(bp); + + /* Request is built from stats_query_header and an array of + * stats_query_cmd_group each of which contains + * STATS_QUERY_CMD_COUNT rules. The real number or requests is + * configured in the stats_query_header. + */ + num_groups = + (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) + + (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ? + 1 : 0)); + + DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n", + bp->fw_stats_num, vf_headroom, num_groups); + bp->fw_stats_req_sz = sizeof(struct stats_query_header) + + num_groups * sizeof(struct stats_query_cmd_group); + + /* Data for statistics requests + stats_counter + * stats_counter holds per-STORM counters that are incremented + * when STORM has finished with the current request. + * memory for FCoE offloaded statistics are counted anyway, + * even if they will not be sent. + * VF stats are not accounted for here as the data of VF stats is stored + * in memory allocated by the VF, not here. + */ + bp->fw_stats_data_sz = sizeof(struct per_port_stats) + + sizeof(struct per_pf_stats) + + sizeof(struct fcoe_statistics_params) + + sizeof(struct per_queue_stats) * num_queue_stats + + sizeof(struct stats_counter); + + bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + if (!bp->fw_stats) + goto alloc_mem_err; + + /* Set shortcuts */ + bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; + bp->fw_stats_req_mapping = bp->fw_stats_mapping; + bp->fw_stats_data = (struct bnx2x_fw_stats_data *) + ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); + bp->fw_stats_data_mapping = bp->fw_stats_mapping + + bp->fw_stats_req_sz; + + DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n", + U64_HI(bp->fw_stats_req_mapping), + U64_LO(bp->fw_stats_req_mapping)); + DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n", + U64_HI(bp->fw_stats_data_mapping), + U64_LO(bp->fw_stats_data_mapping)); + return 0; + +alloc_mem_err: + bnx2x_free_fw_stats_mem(bp); + BNX2X_ERR("Can't allocate FW stats memory\n"); + return -ENOMEM; +} + +/* send load request to mcp and analyze response */ +static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) +{ + u32 param; + + /* init fw_seq */ + bp->fw_seq = + (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); + + /* Get current FW pulse sequence */ + bp->fw_drv_pulse_wr_seq = + (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) & + DRV_PULSE_SEQ_MASK); + BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); + + param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA; + + if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp)) + param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA; + + /* load request */ + (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param); + + /* if mcp fails to respond we must abort */ + if (!(*load_code)) { + BNX2X_ERR("MCP response failure, aborting\n"); + return -EBUSY; + } + + /* If mcp refused (e.g. other port is in diagnostic mode) we + * must abort + */ + if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { + BNX2X_ERR("MCP refused load request, aborting\n"); + return -EBUSY; + } + return 0; +} + +/* check whether another PF has already loaded FW to chip. In + * virtualized environments a pf from another VM may have already + * initialized the device including loading FW + */ +int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) +{ + /* is another pf loaded on this engine? */ + if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && + load_code != FW_MSG_CODE_DRV_LOAD_COMMON) { + /* build my FW version dword */ + u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) + + (BCM_5710_FW_MINOR_VERSION << 8) + + (BCM_5710_FW_REVISION_VERSION << 16) + + (BCM_5710_FW_ENGINEERING_VERSION << 24); + + /* read loaded FW from chip */ + u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); + + DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n", + loaded_fw, my_fw); + + /* abort nic load if version mismatch */ + if (my_fw != loaded_fw) { + if (print_err) + BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n", + loaded_fw, my_fw); + else + BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n", + loaded_fw, my_fw); + return -EBUSY; + } + } + return 0; +} + +/* returns the "mcp load_code" according to global load_count array */ +static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port) +{ + int path = BP_PATH(bp); + + DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", + path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], + bnx2x_load_count[path][2]); + bnx2x_load_count[path][0]++; + bnx2x_load_count[path][1 + port]++; + DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n", + path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], + bnx2x_load_count[path][2]); + if (bnx2x_load_count[path][0] == 1) + return FW_MSG_CODE_DRV_LOAD_COMMON; + else if (bnx2x_load_count[path][1 + port] == 1) + return FW_MSG_CODE_DRV_LOAD_PORT; + else + return FW_MSG_CODE_DRV_LOAD_FUNCTION; +} + +/* mark PMF if applicable */ +static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code) +{ + if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || + (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || + (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { + bp->port.pmf = 1; + /* We need the barrier to ensure the ordering between the + * writing to bp->port.pmf here and reading it from the + * bnx2x_periodic_task(). + */ + smp_mb(); + } else { + bp->port.pmf = 0; + } + + DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); +} + +static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code) +{ + if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || + (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && + (bp->common.shmem2_base)) { + if (SHMEM2_HAS(bp, dcc_support)) + SHMEM2_WR(bp, dcc_support, + (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | + SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); + if (SHMEM2_HAS(bp, afex_driver_support)) + SHMEM2_WR(bp, afex_driver_support, + SHMEM_AFEX_SUPPORTED_VERSION_ONE); + } + + /* Set AFEX default VLAN tag to an invalid value */ + bp->afex_def_vlan_tag = -1; +} + +/** + * bnx2x_bz_fp - zero content of the fastpath structure. + * + * @bp: driver handle + * @index: fastpath index to be zeroed + * + * Makes sure the contents of the bp->fp[index].napi is kept + * intact. + */ +static void bnx2x_bz_fp(struct bnx2x *bp, int index) +{ + struct bnx2x_fastpath *fp = &bp->fp[index]; + int cos; + struct napi_struct orig_napi = fp->napi; + struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; + + /* bzero bnx2x_fastpath contents */ + if (fp->tpa_info) + memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * + sizeof(struct bnx2x_agg_info)); + memset(fp, 0, sizeof(*fp)); + + /* Restore the NAPI object as it has been already initialized */ + fp->napi = orig_napi; + fp->tpa_info = orig_tpa_info; + fp->bp = bp; + fp->index = index; + if (IS_ETH_FP(fp)) + fp->max_cos = bp->max_cos; + else + /* Special queues support only one CoS */ + fp->max_cos = 1; + + /* Init txdata pointers */ + if (IS_FCOE_FP(fp)) + fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; + if (IS_ETH_FP(fp)) + for_each_cos_in_tx_queue(fp, cos) + fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * + BNX2X_NUM_ETH_QUEUES(bp) + index]; + + /* set the tpa flag for each queue. The tpa flag determines the queue + * minimal size so it must be set prior to queue memory allocation + */ + fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || + (bp->flags & GRO_ENABLE_FLAG && + bnx2x_mtu_allows_gro(bp->dev->mtu))); + if (bp->flags & TPA_ENABLE_FLAG) + fp->mode = TPA_MODE_LRO; + else if (bp->flags & GRO_ENABLE_FLAG) + fp->mode = TPA_MODE_GRO; + + /* We don't want TPA on an FCoE L2 ring */ + if (IS_FCOE_FP(fp)) + fp->disable_tpa = 1; +} + +int bnx2x_load_cnic(struct bnx2x *bp) +{ + int i, rc, port = BP_PORT(bp); + + DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n"); + + mutex_init(&bp->cnic_mutex); + + if (IS_PF(bp)) { + rc = bnx2x_alloc_mem_cnic(bp); + if (rc) { + BNX2X_ERR("Unable to allocate bp memory for cnic\n"); + LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); + } + } + + rc = bnx2x_alloc_fp_mem_cnic(bp); + if (rc) { + BNX2X_ERR("Unable to allocate memory for cnic fps\n"); + LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); + } + + /* Update the number of queues with the cnic queues */ + rc = bnx2x_set_real_num_queues(bp, 1); + if (rc) { + BNX2X_ERR("Unable to set real_num_queues including cnic\n"); + LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); + } + + /* Add all CNIC NAPI objects */ + bnx2x_add_all_napi_cnic(bp); + DP(NETIF_MSG_IFUP, "cnic napi added\n"); + bnx2x_napi_enable_cnic(bp); + + rc = bnx2x_init_hw_func_cnic(bp); + if (rc) + LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1); + + bnx2x_nic_init_cnic(bp); + + if (IS_PF(bp)) { + /* Enable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); + + /* setup cnic queues */ + for_each_cnic_queue(bp, i) { + rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); + if (rc) { + BNX2X_ERR("Queue setup failed\n"); + LOAD_ERROR_EXIT(bp, load_error_cnic2); + } + } + } + + /* Initialize Rx filter. */ + bnx2x_set_rx_mode_inner(bp); + + /* re-read iscsi info */ + bnx2x_get_iscsi_info(bp); + bnx2x_setup_cnic_irq_info(bp); + bnx2x_setup_cnic_info(bp); + bp->cnic_loaded = true; + if (bp->state == BNX2X_STATE_OPEN) + bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); + + DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n"); + + return 0; + +#ifndef BNX2X_STOP_ON_ERROR +load_error_cnic2: + /* Disable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); + +load_error_cnic1: + bnx2x_napi_disable_cnic(bp); + /* Update the number of queues without the cnic queues */ + if (bnx2x_set_real_num_queues(bp, 0)) + BNX2X_ERR("Unable to set real_num_queues not including cnic\n"); +load_error_cnic0: + BNX2X_ERR("CNIC-related load failed\n"); + bnx2x_free_fp_mem_cnic(bp); + bnx2x_free_mem_cnic(bp); + return rc; +#endif /* ! BNX2X_STOP_ON_ERROR */ +} /* must be called with rtnl_lock */ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) { int port = BP_PORT(bp); - u32 load_code; - int i, rc; + int i, rc = 0, load_code = 0; + + DP(NETIF_MSG_IFUP, "Starting NIC load\n"); + DP(NETIF_MSG_IFUP, + "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled"); #ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) + if (unlikely(bp->panic)) { + BNX2X_ERR("Can't load NIC when there is panic\n"); return -EPERM; + } #endif bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; - /* Set the initial link reported state to link down */ - bnx2x_acquire_phy_lock(bp); + /* zero the structure w/o any lock, before SP handler is initialized */ memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, &bp->last_reported_link.link_report_flags); - bnx2x_release_phy_lock(bp); - /* must be called before memory allocation and HW init */ - bnx2x_ilt_set_info(bp); + if (IS_PF(bp)) + /* must be called before memory allocation and HW init */ + bnx2x_ilt_set_info(bp); /* * Zero fastpath structures preserving invariants like napi, which are * allocated only once, fp index, max_cos, bp pointer. - * Also set fp->disable_tpa. + * Also set fp->disable_tpa and txdata_ptr. */ + DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); for_each_queue(bp, i) bnx2x_bz_fp(bp, i); + memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + + bp->num_cnic_queues) * + sizeof(struct bnx2x_fp_txdata)); + bp->fcoe_init = false; /* Set the receive queues buffer size */ bnx2x_set_rx_buf_size(bp); - if (bnx2x_alloc_mem(bp)) - return -ENOMEM; + if (IS_PF(bp)) { + rc = bnx2x_alloc_mem(bp); + if (rc) { + BNX2X_ERR("Unable to allocate bp memory\n"); + return rc; + } + } + + /* need to be done after alloc mem, since it's self adjusting to amount + * of memory available for RSS queues + */ + rc = bnx2x_alloc_fp_mem(bp); + if (rc) { + BNX2X_ERR("Unable to allocate memory for fps\n"); + LOAD_ERROR_EXIT(bp, load_error0); + } + + /* Allocated memory for FW statistics */ + if (bnx2x_alloc_fw_stats_mem(bp)) + LOAD_ERROR_EXIT(bp, load_error0); + + /* request pf to initialize status blocks */ + if (IS_VF(bp)) { + rc = bnx2x_vfpf_init(bp); + if (rc) + LOAD_ERROR_EXIT(bp, load_error0); + } /* As long as bnx2x_alloc_mem() may possibly update * bp->num_queues, bnx2x_set_real_num_queues() should always - * come after it. + * come after it. At this stage cnic queues are not counted. */ - rc = bnx2x_set_real_num_queues(bp); + rc = bnx2x_set_real_num_queues(bp, 0); if (rc) { BNX2X_ERR("Unable to set real_num_queues\n"); LOAD_ERROR_EXIT(bp, load_error0); } /* configure multi cos mappings in kernel. - * this configuration may be overriden by a multi class queue discipline - * or by a dcbx negotiation result. + * this configuration may be overridden by a multi class queue + * discipline or by a dcbx negotiation result. */ bnx2x_setup_tc(bp->dev, bp->max_cos); + /* Add all NAPI objects */ + bnx2x_add_all_napi(bp); + DP(NETIF_MSG_IFUP, "napi added\n"); bnx2x_napi_enable(bp); - /* Send LOAD_REQUEST command to MCP - * Returns the type of LOAD command: - * if it is the first port to be initialized - * common blocks should be initialized, otherwise - not - */ - if (!BP_NOMCP(bp)) { - load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); - if (!load_code) { - BNX2X_ERR("MCP response failure, aborting\n"); - rc = -EBUSY; - LOAD_ERROR_EXIT(bp, load_error1); - } - if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { - rc = -EBUSY; /* other port in diagnostic mode */ - LOAD_ERROR_EXIT(bp, load_error1); + if (IS_PF(bp)) { + /* set pf load just before approaching the MCP */ + bnx2x_set_pf_load(bp); + + /* if mcp exists send load request and analyze response */ + if (!BP_NOMCP(bp)) { + /* attempt to load pf */ + rc = bnx2x_nic_load_request(bp, &load_code); + if (rc) + LOAD_ERROR_EXIT(bp, load_error1); + + /* what did mcp say? */ + rc = bnx2x_compare_fw_ver(bp, load_code, true); + if (rc) { + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + LOAD_ERROR_EXIT(bp, load_error2); + } + } else { + load_code = bnx2x_nic_load_no_mcp(bp, port); } - } else { - int path = BP_PATH(bp); - - DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", - path, load_count[path][0], load_count[path][1], - load_count[path][2]); - load_count[path][0]++; - load_count[path][1 + port]++; - DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n", - path, load_count[path][0], load_count[path][1], - load_count[path][2]); - if (load_count[path][0] == 1) - load_code = FW_MSG_CODE_DRV_LOAD_COMMON; - else if (load_count[path][1 + port] == 1) - load_code = FW_MSG_CODE_DRV_LOAD_PORT; - else - load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; - } - - if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || - (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || - (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { - bp->port.pmf = 1; - /* - * We need the barrier to ensure the ordering between the - * writing to bp->port.pmf here and reading it from the - * bnx2x_periodic_task(). - */ - smp_mb(); - queue_delayed_work(bnx2x_wq, &bp->period_task, 0); - } else - bp->port.pmf = 0; - - DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); + /* mark pmf if applicable */ + bnx2x_nic_load_pmf(bp, load_code); - /* Init Function state controlling object */ - bnx2x__init_func_obj(bp); + /* Init Function state controlling object */ + bnx2x__init_func_obj(bp); - /* Initialize HW */ - rc = bnx2x_init_hw(bp, load_code); - if (rc) { - BNX2X_ERR("HW init failed, aborting\n"); - bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); - LOAD_ERROR_EXIT(bp, load_error2); + /* Initialize HW */ + rc = bnx2x_init_hw(bp, load_code); + if (rc) { + BNX2X_ERR("HW init failed, aborting\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + LOAD_ERROR_EXIT(bp, load_error2); + } } + bnx2x_pre_irq_nic_init(bp); + /* Connect to IRQs */ rc = bnx2x_setup_irqs(bp); if (rc) { - bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + BNX2X_ERR("setup irqs failed\n"); + if (IS_PF(bp)) + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); LOAD_ERROR_EXIT(bp, load_error2); } - /* Setup NIC internals and enable interrupts */ - bnx2x_nic_init(bp, load_code); - /* Init per-function objects */ - bnx2x_init_bp_objs(bp); + if (IS_PF(bp)) { + /* Setup NIC internals and enable interrupts */ + bnx2x_post_irq_nic_init(bp, load_code); + + bnx2x_init_bp_objs(bp); + bnx2x_iov_nic_init(bp); + + /* Set AFEX default VLAN tag to an invalid value */ + bp->afex_def_vlan_tag = -1; + bnx2x_nic_load_afex_dcc(bp, load_code); + bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; + rc = bnx2x_func_start(bp); + if (rc) { + BNX2X_ERR("Function start failed!\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); - if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || - (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && - (bp->common.shmem2_base)) { - if (SHMEM2_HAS(bp, dcc_support)) - SHMEM2_WR(bp, dcc_support, - (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | - SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); + LOAD_ERROR_EXIT(bp, load_error3); + } + + /* Send LOAD_DONE command to MCP */ + if (!BP_NOMCP(bp)) { + load_code = bnx2x_fw_command(bp, + DRV_MSG_CODE_LOAD_DONE, 0); + if (!load_code) { + BNX2X_ERR("MCP response failure, aborting\n"); + rc = -EBUSY; + LOAD_ERROR_EXIT(bp, load_error3); + } + } + + /* initialize FW coalescing state machines in RAM */ + bnx2x_update_coalesce(bp); } - bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; - rc = bnx2x_func_start(bp); + /* setup the leading queue */ + rc = bnx2x_setup_leading(bp); if (rc) { - BNX2X_ERR("Function start failed!\n"); - bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + BNX2X_ERR("Setup leading failed!\n"); LOAD_ERROR_EXIT(bp, load_error3); } - /* Send LOAD_DONE command to MCP */ - if (!BP_NOMCP(bp)) { - load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); - if (!load_code) { - BNX2X_ERR("MCP response failure, aborting\n"); - rc = -EBUSY; + /* set up the rest of the queues */ + for_each_nondefault_eth_queue(bp, i) { + if (IS_PF(bp)) + rc = bnx2x_setup_queue(bp, &bp->fp[i], false); + else /* VF */ + rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); + if (rc) { + BNX2X_ERR("Queue %d setup failed\n", i); LOAD_ERROR_EXIT(bp, load_error3); } } - rc = bnx2x_setup_leading(bp); + /* setup rss */ + rc = bnx2x_init_rss(bp); if (rc) { - BNX2X_ERR("Setup leading failed!\n"); + BNX2X_ERR("PF RSS init failed\n"); LOAD_ERROR_EXIT(bp, load_error3); } -#ifdef BCM_CNIC - /* Enable Timer scan */ - REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); -#endif - - for_each_nondefault_queue(bp, i) { - rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); - if (rc) - LOAD_ERROR_EXIT(bp, load_error4); - } - - rc = bnx2x_init_rss_pf(bp); - if (rc) - LOAD_ERROR_EXIT(bp, load_error4); - /* Now when Clients are configured we are ready to work */ bp->state = BNX2X_STATE_OPEN; /* Configure a ucast MAC */ - rc = bnx2x_set_eth_mac(bp, true); - if (rc) - LOAD_ERROR_EXIT(bp, load_error4); + if (IS_PF(bp)) + rc = bnx2x_set_eth_mac(bp, true); + else /* vf */ + rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, + true); + if (rc) { + BNX2X_ERR("Setting Ethernet MAC failed\n"); + LOAD_ERROR_EXIT(bp, load_error3); + } - if (bp->pending_max) { + if (IS_PF(bp) && bp->pending_max) { bnx2x_update_max_mf_config(bp, bp->pending_max); bp->pending_max = 0; } - if (bp->port.pmf) - bnx2x_initial_phy_init(bp, load_mode); + if (bp->port.pmf) { + rc = bnx2x_initial_phy_init(bp, load_mode); + if (rc) + LOAD_ERROR_EXIT(bp, load_error3); + } + bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN; /* Start fast path */ /* Initialize Rx filter. */ - netif_addr_lock_bh(bp->dev); - bnx2x_set_rx_mode(bp->dev); - netif_addr_unlock_bh(bp->dev); + bnx2x_set_rx_mode_inner(bp); /* Start the Tx */ switch (load_mode) { case LOAD_NORMAL: - /* Tx queue should be only reenabled */ + /* Tx queue should be only re-enabled */ netif_tx_wake_all_queues(bp->dev); break; case LOAD_OPEN: netif_tx_start_all_queues(bp->dev); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); break; case LOAD_DIAG: + case LOAD_LOOPBACK_EXT: bp->state = BNX2X_STATE_DIAG; break; @@ -1934,43 +2813,51 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) } if (bp->port.pmf) - bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0); + bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0); else bnx2x__link_status_update(bp); /* start the timer */ mod_timer(&bp->timer, jiffies + bp->current_interval); -#ifdef BCM_CNIC - /* re-read iscsi info */ - bnx2x_get_iscsi_info(bp); - bnx2x_setup_cnic_irq_info(bp); - if (bp->state == BNX2X_STATE_OPEN) - bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); -#endif - bnx2x_inc_load_cnt(bp); + if (CNIC_ENABLED(bp)) + bnx2x_load_cnic(bp); + + if (IS_PF(bp)) + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); + + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { + /* mark driver is loaded in shmem2 */ + u32 val; + val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); + SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], + val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | + DRV_FLAGS_CAPABILITIES_LOADED_L2); + } /* Wait for all pending SP commands to complete */ - if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { + if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) { BNX2X_ERR("Timeout waiting for SP elements to complete\n"); - bnx2x_nic_unload(bp, UNLOAD_CLOSE); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); return -EBUSY; } - bnx2x_dcbx_init(bp); + /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ + if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) + bnx2x_dcbx_init(bp, false); + + DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n"); + return 0; #ifndef BNX2X_STOP_ON_ERROR -load_error4: -#ifdef BCM_CNIC - /* Disable Timer scan */ - REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); -#endif load_error3: - bnx2x_int_disable_sync(bp, 1); + if (IS_PF(bp)) { + bnx2x_int_disable_sync(bp, 1); - /* Clean queueable objects */ - bnx2x_squeeze_objects(bp); + /* Clean queueable objects */ + bnx2x_squeeze_objects(bp); + } /* Free SKBs, SGEs, TPA pool and driver internals */ bnx2x_free_skbs(bp); @@ -1980,7 +2867,7 @@ load_error3: /* Release IRQs */ bnx2x_free_irq(bp); load_error2: - if (!BP_NOMCP(bp)) { + if (IS_PF(bp) && !BP_NOMCP(bp)) { bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); } @@ -1988,21 +2875,55 @@ load_error2: bp->port.pmf = 0; load_error1: bnx2x_napi_disable(bp); + bnx2x_del_all_napi(bp); + + /* clear pf_load status, as it was already set */ + if (IS_PF(bp)) + bnx2x_clear_pf_load(bp); load_error0: + bnx2x_free_fw_stats_mem(bp); + bnx2x_free_fp_mem(bp); bnx2x_free_mem(bp); return rc; #endif /* ! BNX2X_STOP_ON_ERROR */ } +int bnx2x_drain_tx_queues(struct bnx2x *bp) +{ + u8 rc = 0, cos, i; + + /* Wait until tx fastpath tasks complete */ + for_each_tx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + for_each_cos_in_tx_queue(fp, cos) + rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); + if (rc) + return rc; + } + return 0; +} + /* must be called with rtnl_lock */ -int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) { int i; bool global = false; - if ((bp->state == BNX2X_STATE_CLOSED) || - (bp->state == BNX2X_STATE_ERROR)) { + DP(NETIF_MSG_IFUP, "Starting NIC unload\n"); + + /* mark driver is unloaded in shmem2 */ + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { + u32 val; + val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); + SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], + val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); + } + + if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE && + (bp->state == BNX2X_STATE_CLOSED || + bp->state == BNX2X_STATE_ERROR)) { /* We can get here if the driver has been unloaded * during parity error recovery and is either waiting for a * leader to complete or for other functions to unload and @@ -2015,48 +2936,67 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) bnx2x_release_leader_lock(bp); smp_mb(); - DP(NETIF_MSG_HW, "Releasing a leadership...\n"); - + DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n"); + BNX2X_ERR("Can't unload in closed or error state\n"); return -EINVAL; } - /* - * It's important to set the bp->state to the value different from + /* Nothing to do during unload if previous bnx2x_nic_load() + * have not completed successfully - all resources are released. + * + * we can get here only after unsuccessful ndo_* callback, during which + * dev->IFF_UP flag is still on. + */ + if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR) + return 0; + + /* It's important to set the bp->state to the value different from * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int() * may restart the Tx from the NAPI context (see bnx2x_tx_int()). */ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; smp_mb(); + /* indicate to VFs that the PF is going down */ + bnx2x_iov_channel_down(bp); + + if (CNIC_LOADED(bp)) + bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); + /* Stop Tx */ bnx2x_tx_disable(bp); - -#ifdef BCM_CNIC - bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); -#endif + netdev_reset_tc(bp->dev); bp->rx_mode = BNX2X_RX_MODE_NONE; del_timer_sync(&bp->timer); - /* Set ALWAYS_ALIVE bit in shmem */ - bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; - - bnx2x_drv_pulse(bp); + if (IS_PF(bp)) { + /* Set ALWAYS_ALIVE bit in shmem */ + bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; + bnx2x_drv_pulse(bp); + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_save_statistics(bp); + } - bnx2x_stats_handle(bp, STATS_EVENT_STOP); + /* wait till consumers catch up with producers in all queues */ + bnx2x_drain_tx_queues(bp); - /* Cleanup the chip if needed */ - if (unload_mode != UNLOAD_RECOVERY) - bnx2x_chip_cleanup(bp, unload_mode); + /* if VF indicate to PF this function is going down (PF will delete sp + * elements and clear initializations + */ + if (IS_VF(bp)) + bnx2x_vfpf_close_vf(bp); + else if (unload_mode != UNLOAD_RECOVERY) + /* if this is a normal/close unload need to clean up chip*/ + bnx2x_chip_cleanup(bp, unload_mode, keep_link); else { /* Send the UNLOAD_REQUEST to the MCP */ bnx2x_send_unload_req(bp, unload_mode); - /* - * Prevent transactions to host from the functions on the + /* Prevent transactions to host from the functions on the * engine that doesn't reset global blocks in case of global - * attention once gloabl blocks are reset and gates are opened + * attention once global blocks are reset and gates are opened * (the engine which leader will perform the recovery * last). */ @@ -2065,38 +3005,61 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); - + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + if (CNIC_LOADED(bp)) + bnx2x_del_all_napi_cnic(bp); /* Release IRQs */ bnx2x_free_irq(bp); /* Report UNLOAD_DONE to MCP */ - bnx2x_send_unload_done(bp); + bnx2x_send_unload_done(bp, false); } /* - * At this stage no more interrupts will arrive so we may safly clean + * At this stage no more interrupts will arrive so we may safely clean * the queueable objects here in case they failed to get cleaned so far. */ - bnx2x_squeeze_objects(bp); + if (IS_PF(bp)) + bnx2x_squeeze_objects(bp); /* There should be no more pending SP commands at this stage */ bp->sp_state = 0; bp->port.pmf = 0; + /* clear pending work in rtnl task */ + bp->sp_rtnl_state = 0; + smp_mb(); + /* Free SKBs, SGEs, TPA pool and driver internals */ bnx2x_free_skbs(bp); + if (CNIC_LOADED(bp)) + bnx2x_free_skbs_cnic(bp); for_each_rx_queue(bp, i) bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + bnx2x_free_fp_mem(bp); + if (CNIC_LOADED(bp)) + bnx2x_free_fp_mem_cnic(bp); + + if (IS_PF(bp)) { + if (CNIC_LOADED(bp)) + bnx2x_free_mem_cnic(bp); + } bnx2x_free_mem(bp); bp->state = BNX2X_STATE_CLOSED; + bp->cnic_loaded = false; + + /* Clear driver version indication in shmem */ + if (IS_PF(bp)) + bnx2x_update_mng_version(bp); /* Check if there are pending parity attentions. If there are - set * RECOVERY_IN_PROGRESS. */ - if (bnx2x_chk_parity_attn(bp, &global, false)) { + if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) { bnx2x_set_reset_in_progress(bp); /* Set RESET_IS_GLOBAL if needed */ @@ -2104,13 +3067,16 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) bnx2x_set_reset_global(bp); } - /* The last driver must disable a "close the gate" if there is no * parity attention or "process kill" pending. */ - if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp))) + if (IS_PF(bp) && + !bnx2x_clear_pf_load(bp) && + bnx2x_reset_is_done(bp, BP_PATH(bp))) bnx2x_disable_close_the_gate(bp); + DP(NETIF_MSG_IFUP, "Ending NIC unload\n"); + return 0; } @@ -2119,16 +3085,16 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) u16 pmcsr; /* If there is no power capability, silently succeed */ - if (!bp->pm_cap) { - DP(NETIF_MSG_HW, "No power capability. Breaking.\n"); + if (!bp->pdev->pm_cap) { + BNX2X_DEV_INFO("No power capability. Breaking.\n"); return 0; } - pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); + pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr); switch (state) { case PCI_D0: - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, + pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | PCI_PM_CTRL_PME_STATUS)); @@ -2152,7 +3118,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) if (bp->wol) pmcsr |= PCI_PM_CTRL_PME_ENABLE; - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, + pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, pmcsr); /* No more memory access after this point until @@ -2161,6 +3127,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) break; default: + dev_err(&bp->pdev->dev, "Can't support state = %d\n", state); return -EINVAL; } return 0; @@ -2169,7 +3136,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) /* * net_device service functions */ -int bnx2x_poll(struct napi_struct *napi, int budget) +static int bnx2x_poll(struct napi_struct *napi, int budget) { int work_done = 0; u8 cos; @@ -2184,23 +3151,27 @@ int bnx2x_poll(struct napi_struct *napi, int budget) return 0; } #endif + if (!bnx2x_fp_lock_napi(fp)) + return work_done; for_each_cos_in_tx_queue(fp, cos) - if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) - bnx2x_tx_int(bp, &fp->txdata[cos]); - + if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) + bnx2x_tx_int(bp, fp->txdata_ptr[cos]); if (bnx2x_has_rx_work(fp)) { work_done += bnx2x_rx_int(fp, budget - work_done); /* must not complete if we consumed full budget */ - if (work_done >= budget) + if (work_done >= budget) { + bnx2x_fp_unlock_napi(fp); break; + } } /* Fall out from the NAPI loop if needed */ - if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { -#ifdef BCM_CNIC + if (!bnx2x_fp_unlock_napi(fp) && + !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + /* No need to update SB for FCoE L2 ring as long as * it's connected to the default SB and the SB * has been updated when NAPI was scheduled. @@ -2209,8 +3180,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget) napi_complete(napi); break; } -#endif - bnx2x_update_fpsb_idx(fp); /* bnx2x_has_rx_work() reads the status block, * thus we need to ensure that status block indices @@ -2230,7 +3199,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget) if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { napi_complete(napi); /* Re-enable interrupts */ - DP(NETIF_MSG_HW, + DP(NETIF_MSG_RX_STATUS, "Update index to %d\n", fp->fp_hc_idx); bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, le16_to_cpu(fp->fp_hc_idx), @@ -2243,17 +3212,41 @@ int bnx2x_poll(struct napi_struct *napi, int budget) return work_done; } +#ifdef CONFIG_NET_RX_BUSY_POLL +/* must be called with local_bh_disable()d */ +int bnx2x_low_latency_recv(struct napi_struct *napi) +{ + struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, + napi); + struct bnx2x *bp = fp->bp; + int found = 0; + + if ((bp->state == BNX2X_STATE_CLOSED) || + (bp->state == BNX2X_STATE_ERROR) || + (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG))) + return LL_FLUSH_FAILED; + + if (!bnx2x_fp_lock_poll(fp)) + return LL_FLUSH_BUSY; + + if (bnx2x_has_rx_work(fp)) + found = bnx2x_rx_int(fp, 4); + + bnx2x_fp_unlock_poll(fp); + + return found; +} +#endif + /* we split the first BD into headers and data BDs * to ease the pain of our fellow microcode engineers * we use one mapping for both BDs - * So far this has only been observed to happen - * in Other Operating Systems(TM) */ -static noinline u16 bnx2x_tx_split(struct bnx2x *bp, - struct bnx2x_fp_txdata *txdata, - struct sw_tx_bd *tx_buf, - struct eth_tx_start_bd **tx_bd, u16 hlen, - u16 bd_prod, int nbd) +static u16 bnx2x_tx_split(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata, + struct sw_tx_bd *tx_buf, + struct eth_tx_start_bd **tx_bd, u16 hlen, + u16 bd_prod) { struct eth_tx_start_bd *h_tx_bd = *tx_bd; struct eth_tx_bd *d_tx_bd; @@ -2261,12 +3254,10 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp, int old_len = le16_to_cpu(h_tx_bd->nbytes); /* first fix first BD */ - h_tx_bd->nbd = cpu_to_le16(nbd); h_tx_bd->nbytes = cpu_to_le16(hlen); - DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d " - "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi, - h_tx_bd->addr_lo, h_tx_bd->nbd); + DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n", + h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo); /* now get a new data BD * (after the pbd) and fill it */ @@ -2293,43 +3284,66 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp, return bd_prod; } -static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) +#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32))) +#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16))) +static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) { + __sum16 tsum = (__force __sum16) csum; + if (fix > 0) - csum = (u16) ~csum_fold(csum_sub(csum, - csum_partial(t_header - fix, fix, 0))); + tsum = ~csum_fold(csum_sub((__force __wsum) csum, + csum_partial(t_header - fix, fix, 0))); else if (fix < 0) - csum = (u16) ~csum_fold(csum_add(csum, - csum_partial(t_header, -fix, 0))); + tsum = ~csum_fold(csum_add((__force __wsum) csum, + csum_partial(t_header, -fix, 0))); - return swab16(csum); + return bswab16(tsum); } -static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) +static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) { u32 rc; + __u8 prot = 0; + __be16 protocol; if (skb->ip_summed != CHECKSUM_PARTIAL) - rc = XMIT_PLAIN; + return XMIT_PLAIN; - else { - if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) { - rc = XMIT_CSUM_V6; - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - rc |= XMIT_CSUM_TCP; + protocol = vlan_get_protocol(skb); + if (protocol == htons(ETH_P_IPV6)) { + rc = XMIT_CSUM_V6; + prot = ipv6_hdr(skb)->nexthdr; + } else { + rc = XMIT_CSUM_V4; + prot = ip_hdr(skb)->protocol; + } + if (!CHIP_IS_E1x(bp) && skb->encapsulation) { + if (inner_ip_hdr(skb)->version == 6) { + rc |= XMIT_CSUM_ENC_V6; + if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + rc |= XMIT_CSUM_TCP; } else { - rc = XMIT_CSUM_V4; - if (ip_hdr(skb)->protocol == IPPROTO_TCP) + rc |= XMIT_CSUM_ENC_V4; + if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP) rc |= XMIT_CSUM_TCP; } } - - if (skb_is_gso_v6(skb)) - rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6; - else if (skb_is_gso(skb)) - rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP; + if (prot == IPPROTO_TCP) + rc |= XMIT_CSUM_TCP; + + if (skb_is_gso(skb)) { + if (skb_is_gso_v6(skb)) { + rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP); + if (rc & XMIT_CSUM_ENC) + rc |= XMIT_GSO_ENC_V6; + } else { + rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP); + if (rc & XMIT_CSUM_ENC) + rc |= XMIT_GSO_ENC_V4; + } + } return rc; } @@ -2406,8 +3420,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, exit_lbl: if (unlikely(to_copy)) DP(NETIF_MSG_TX_QUEUED, - "Linearization IS REQUIRED for %s packet. " - "num_frags %d hlen %d first_bd_sz %d\n", + "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n", (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); @@ -2415,14 +3428,23 @@ exit_lbl: } #endif -static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, - u32 xmit_type) +static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, + u32 xmit_type) { + struct ipv6hdr *ipv6; + *parsing_data |= (skb_shinfo(skb)->gso_size << ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & ETH_TX_PARSE_BD_E2_LSO_MSS; - if ((xmit_type & XMIT_GSO_V6) && - (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) + + if (xmit_type & XMIT_GSO_ENC_V6) + ipv6 = inner_ipv6_hdr(skb); + else if (xmit_type & XMIT_GSO_V6) + ipv6 = ipv6_hdr(skb); + else + ipv6 = NULL; + + if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6) *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; } @@ -2433,28 +3455,67 @@ static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, * @pbd: parse BD * @xmit_type: xmit flags */ -static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, - struct eth_tx_parse_bd_e1x *pbd, - u32 xmit_type) +static void bnx2x_set_pbd_gso(struct sk_buff *skb, + struct eth_tx_parse_bd_e1x *pbd, + struct eth_tx_start_bd *tx_start_bd, + u32 xmit_type) { pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); - pbd->tcp_flags = pbd_tcp_flags(skb); + pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq); + pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb)); if (xmit_type & XMIT_GSO_V4) { - pbd->ip_id = swab16(ip_hdr(skb)->id); + pbd->ip_id = bswab16(ip_hdr(skb)->id); pbd->tcp_pseudo_csum = - swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, - ip_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0)); + bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); - } else + /* GSO on 57710/57711 needs FW to calculate IP checksum */ + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; + } else { pbd->tcp_pseudo_csum = - swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0)); + bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + } + + pbd->global_data |= + cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); +} + +/** + * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length + * + * @bp: driver handle + * @skb: packet skb + * @parsing_data: data to be updated + * @xmit_type: xmit flags + * + * 57712/578xx related, when skb has encapsulation + */ +static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb, + u32 *parsing_data, u32 xmit_type) +{ + *parsing_data |= + ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) << + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W; + + if (xmit_type & XMIT_CSUM_TCP) { + *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) << + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; + + return skb_inner_transport_header(skb) + + inner_tcp_hdrlen(skb) - skb->data; + } - pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN; + /* We support checksum offload for TCP and UDP only. + * No need to pass the UDP header length - it's a constant. + */ + return skb_inner_transport_header(skb) + + sizeof(struct udphdr) - skb->data; } /** @@ -2465,15 +3526,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, * @parsing_data: data to be updated * @xmit_type: xmit flags * - * 57712 related + * 57712/578xx related */ -static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, - u32 *parsing_data, u32 xmit_type) +static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, + u32 *parsing_data, u32 xmit_type) { *parsing_data |= - ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; + ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W; if (xmit_type & XMIT_CSUM_TCP) { *parsing_data |= ((tcp_hdrlen(skb) / 4) << @@ -2481,25 +3542,22 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; - } else - /* We support checksum offload for TCP and UDP only. - * No need to pass the UDP header length - it's a constant. - */ - return skb_transport_header(skb) + - sizeof(struct udphdr) - skb->data; + } + /* We support checksum offload for TCP and UDP only. + * No need to pass the UDP header length - it's a constant. + */ + return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data; } -static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, - struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) +/* set FW indication according to inner or outer protocols if tunneled */ +static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, + struct eth_tx_start_bd *tx_start_bd, + u32 xmit_type) { tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; - if (xmit_type & XMIT_CSUM_V4) - tx_start_bd->bd_flags.as_bitfield |= - ETH_TX_BD_FLAGS_IP_CSUM; - else - tx_start_bd->bd_flags.as_bitfield |= - ETH_TX_BD_FLAGS_IPV6; + if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6)) + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6; if (!(xmit_type & XMIT_CSUM_TCP)) tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; @@ -2513,16 +3571,17 @@ static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, * @pbd: parse BD to be updated * @xmit_type: xmit flags */ -static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, - struct eth_tx_parse_bd_e1x *pbd, - u32 xmit_type) +static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, + struct eth_tx_parse_bd_e1x *pbd, + u32 xmit_type) { u8 hlen = (skb_network_header(skb) - skb->data) >> 1; /* for now NS flag is not used in Linux */ pbd->global_data = - (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << - ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); + cpu_to_le16(hlen | + ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << + ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); pbd->ip_hlen_w = (skb_transport_header(skb) - skb_network_header(skb)) >> 1; @@ -2539,7 +3598,7 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, hlen = hlen*2; if (xmit_type & XMIT_CSUM_TCP) { - pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); + pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check); } else { s8 fix = SKB_CS_OFF(skb); /* signed! */ @@ -2560,6 +3619,77 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, return hlen; } +static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, + struct eth_tx_parse_bd_e2 *pbd_e2, + struct eth_tx_parse_2nd_bd *pbd2, + u16 *global_data, + u32 xmit_type) +{ + u16 hlen_w = 0; + u8 outerip_off, outerip_len = 0; + + /* from outer IP to transport */ + hlen_w = (skb_inner_transport_header(skb) - + skb_network_header(skb)) >> 1; + + /* transport len */ + hlen_w += inner_tcp_hdrlen(skb) >> 1; + + pbd2->fw_ip_hdr_to_payload_w = hlen_w; + + /* outer IP header info */ + if (xmit_type & XMIT_CSUM_V4) { + struct iphdr *iph = ip_hdr(skb); + u32 csum = (__force u32)(~iph->check) - + (__force u32)iph->tot_len - + (__force u32)iph->frag_off; + + pbd2->fw_ip_csum_wo_len_flags_frag = + bswab16(csum_fold((__force __wsum)csum)); + } else { + pbd2->fw_ip_hdr_to_payload_w = + hlen_w - ((sizeof(struct ipv6hdr)) >> 1); + } + + pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); + + pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); + + if (xmit_type & XMIT_GSO_V4) { + pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); + + pbd_e2->data.tunnel_data.pseudo_csum = + bswab16(~csum_tcpudp_magic( + inner_ip_hdr(skb)->saddr, + inner_ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + + outerip_len = ip_hdr(skb)->ihl << 1; + } else { + pbd_e2->data.tunnel_data.pseudo_csum = + bswab16(~csum_ipv6_magic( + &inner_ipv6_hdr(skb)->saddr, + &inner_ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + } + + outerip_off = (skb_network_header(skb) - skb->data) >> 1; + + *global_data |= + outerip_off | + (!!(xmit_type & XMIT_CSUM_V6) << + ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) | + (outerip_len << + ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) | + ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << + ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT); + + if (ip_hdr(skb)->protocol == IPPROTO_UDP) { + SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1); + pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1; + } +} + /* called with netif_tx_lock * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call * netif_wake_queue() @@ -2568,7 +3698,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); - struct bnx2x_fastpath *fp; struct netdev_queue *txq; struct bnx2x_fp_txdata *txdata; struct sw_tx_bd *tx_buf; @@ -2576,9 +3705,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; + struct eth_tx_parse_2nd_bd *pbd2 = NULL; u32 pbd_e2_parsing_data = 0; u16 pkt_prod, bd_prod; - int nbd, txq_index, fp_index, txdata_index; + int nbd, txq_index; dma_addr_t mapping; u32 xmit_type = bnx2x_xmit_type(bp, skb); int i; @@ -2595,50 +3725,43 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) txq_index = skb_get_queue_mapping(skb); txq = netdev_get_tx_queue(dev, txq_index); - BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); + BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0)); - /* decode the fastpath index and the cos index from the txq */ - fp_index = TXQ_TO_FP(txq_index); - txdata_index = TXQ_TO_COS(txq_index); - -#ifdef BCM_CNIC - /* - * Override the above for the FCoE queue: - * - FCoE fp entry is right after the ETH entries. - * - FCoE L2 queue uses bp->txdata[0] only. - */ - if (unlikely(!NO_FCOE(bp) && (txq_index == - bnx2x_fcoe_tx(bp, txq_index)))) { - fp_index = FCOE_IDX; - txdata_index = 0; - } -#endif + txdata = &bp->bnx2x_txq[txq_index]; /* enable this debug print to view the transmission queue being used - DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n", + DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", txq_index, fp_index, txdata_index); */ - /* locate the fastpath and the txdata */ - fp = &bp->fp[fp_index]; - txdata = &fp->txdata[txdata_index]; - - /* enable this debug print to view the tranmission details - DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d" - " tx_data ptr %p fp pointer %p\n", + /* enable this debug print to view the transmission details + DP(NETIF_MSG_TX_QUEUED, + "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", txdata->cid, fp_index, txdata_index, txdata, fp); */ if (unlikely(bnx2x_tx_avail(bp, txdata) < - (skb_shinfo(skb)->nr_frags + 3))) { - fp->eth_q_stats.driver_xoff++; + skb_shinfo(skb)->nr_frags + + BDS_PER_TX_PKT + + NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) { + /* Handle special storage cases separately */ + if (txdata->tx_ring_size == 0) { + struct bnx2x_eth_q_stats *q_stats = + bnx2x_fp_qstats(bp, txdata->parent_fp); + q_stats->driver_filtered_tx_pkt++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; netif_tx_stop_queue(txq); BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); + return NETDEV_TX_BUSY; } - DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x " - "protocol(%x,%x) gso type %x xmit_type %x\n", + DP(NETIF_MSG_TX_QUEUED, + "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n", txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, - ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); + ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type, + skb->len); eth = (struct ethhdr *)skb->data; @@ -2650,7 +3773,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) mac_type = MULTICAST_ADDRESS; } -#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) /* First, check if we need to linearize the skb (due to FW restrictions). No need to check fragmentation if page size > 8K (there will be no violation to FW restrictions) */ @@ -2658,8 +3781,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Statistics of linearization */ bp->lin_cnt++; if (skb_linearize(skb) != 0) { - DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " - "silently dropping this SKB\n"); + DP(NETIF_MSG_TX_QUEUED, + "SKB linearization failed - silently dropping this SKB\n"); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -2669,8 +3792,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) mapping = dma_map_single(&bp->pdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - " - "silently dropping this SKB\n"); + DP(NETIF_MSG_TX_QUEUED, + "SKB mapping failed - silently dropping this SKB\n"); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -2698,11 +3821,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) first_bd = tx_start_bd; tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; - SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE, - mac_type); - /* header nbd */ - SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); + /* header nbd: indirectly zero other flags! */ + tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT; /* remember the first BD of the packet */ tx_buf->first_bd = txdata->tx_bd_prod; @@ -2718,8 +3839,19 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) cpu_to_le16(vlan_tx_tag_get(skb)); tx_start_bd->bd_flags.as_bitfield |= (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); - } else - tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + } else { + /* when transmitting in a vf, start bd must hold the ethertype + * for fw to enforce it + */ + if (IS_VF(bp)) + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(ntohs(eth->h_proto)); + else + /* used by FW for packet accounting */ + tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + } + + nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ /* turn on parsing and get a BD */ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); @@ -2730,45 +3862,96 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!CHIP_IS_E1x(bp)) { pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); - /* Set PBD in checksum offload case */ - if (xmit_type & XMIT_CSUM) + + if (xmit_type & XMIT_CSUM_ENC) { + u16 global_data = 0; + + /* Set PBD in enc checksum offload case */ + hlen = bnx2x_set_pbd_csum_enc(bp, skb, + &pbd_e2_parsing_data, + xmit_type); + + /* turn on 2nd parsing and get a BD */ + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + + pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd; + + memset(pbd2, 0, sizeof(*pbd2)); + + pbd_e2->data.tunnel_data.ip_hdr_start_inner_w = + (skb_inner_network_header(skb) - + skb->data) >> 1; + + if (xmit_type & XMIT_GSO_ENC) + bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2, + &global_data, + xmit_type); + + pbd2->global_data = cpu_to_le16(global_data); + + /* add addition parse BD indication to start BD */ + SET_FLAG(tx_start_bd->general_data, + ETH_TX_START_BD_PARSE_NBDS, 1); + /* set encapsulation flag in start BD */ + SET_FLAG(tx_start_bd->general_data, + ETH_TX_START_BD_TUNNEL_EXIST, 1); + + tx_buf->flags |= BNX2X_HAS_SECOND_PBD; + + nbd++; + } else if (xmit_type & XMIT_CSUM) { + /* Set PBD in checksum offload case w/o encapsulation */ hlen = bnx2x_set_pbd_csum_e2(bp, skb, &pbd_e2_parsing_data, xmit_type); - if (IS_MF_SI(bp)) { - /* - * fill in the MAC addresses in the PBD - for local - * switching - */ - bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi, - &pbd_e2->src_mac_addr_mid, - &pbd_e2->src_mac_addr_lo, + } + + /* Add the macs to the parsing BD if this is a vf or if + * Tx Switching is enabled. + */ + if (IS_VF(bp)) { + /* override GRE parameters in BD */ + bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, + &pbd_e2->data.mac_addr.src_mid, + &pbd_e2->data.mac_addr.src_lo, eth->h_source); - bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi, - &pbd_e2->dst_mac_addr_mid, - &pbd_e2->dst_mac_addr_lo, + + bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, + &pbd_e2->data.mac_addr.dst_mid, + &pbd_e2->data.mac_addr.dst_lo, + eth->h_dest); + } else if (bp->flags & TX_SWITCHING) { + bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, + &pbd_e2->data.mac_addr.dst_mid, + &pbd_e2->data.mac_addr.dst_lo, eth->h_dest); } + + SET_FLAG(pbd_e2_parsing_data, + ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type); } else { + u16 global_data = 0; pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); /* Set PBD in checksum offload case */ if (xmit_type & XMIT_CSUM) hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); + SET_FLAG(global_data, + ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); + pbd_e1x->global_data |= cpu_to_le16(global_data); } /* Setup the data pointer of the first BD of the packet */ tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); pkt_size = tx_start_bd->nbytes; - DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" - " nbytes %d flags %x vlan %x\n", + DP(NETIF_MSG_TX_QUEUED, + "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n", tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, - le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), + le16_to_cpu(tx_start_bd->nbytes), tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan_or_ethertype)); @@ -2781,15 +3964,17 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; - if (unlikely(skb_headlen(skb) > hlen)) + if (unlikely(skb_headlen(skb) > hlen)) { + nbd++; bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, &tx_start_bd, hlen, - bd_prod, ++nbd); + bd_prod); + } if (!CHIP_IS_E1x(bp)) bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, xmit_type); else - bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); + bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type); } /* Set the PBD's parsing_data field if not zero @@ -2809,8 +3994,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { unsigned int pkts_compl = 0, bytes_compl = 0; - DP(NETIF_MSG_TX_QUEUED, "Unable to map page - " - "dropping packet...\n"); + DP(NETIF_MSG_TX_QUEUED, + "Unable to map page - dropping packet...\n"); /* we need unmap all buffers already mapped * for this SKB; @@ -2866,8 +4051,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) if (pbd_e1x) DP(NETIF_MSG_TX_QUEUED, - "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" - " tcp_flags %x xsum %x seq %u hlen %u\n", + "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n", pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, @@ -2875,14 +4059,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) if (pbd_e2) DP(NETIF_MSG_TX_QUEUED, "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n", - pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid, - pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi, - pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo, + pbd_e2, + pbd_e2->data.mac_addr.dst_hi, + pbd_e2->data.mac_addr.dst_mid, + pbd_e2->data.mac_addr.dst_lo, + pbd_e2->data.mac_addr.src_hi, + pbd_e2->data.mac_addr.src_mid, + pbd_e2->data.mac_addr.src_lo, pbd_e2->parsing_data); DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); netdev_tx_sent_queue(txq, skb->len); + skb_tx_timestamp(skb); + txdata->tx_pkt_prod++; /* * Make sure that the BD data is updated before updating the producer @@ -2902,7 +4092,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) txdata->tx_bd_prod += nbd; - if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) { + if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) { netif_tx_stop_queue(txq); /* paired memory barrier is in bnx2x_tx_int(), we have to keep @@ -2910,8 +4100,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) * fp->bd_tx_cons */ smp_mb(); - fp->eth_q_stats.driver_xoff++; - if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3) + bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; + if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT) netif_tx_wake_queue(txq); } txdata->tx_pkt++; @@ -2935,7 +4125,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); - /* no traffic classes requested. aborting */ + /* no traffic classes requested. Aborting */ if (!num_tc) { netdev_reset_tc(dev); return 0; @@ -2943,28 +4133,26 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) /* requested to support too many traffic classes */ if (num_tc > bp->max_cos) { - DP(NETIF_MSG_TX_ERR, "support for too many traffic classes" - " requested: %d. max supported is %d\n", - num_tc, bp->max_cos); + BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n", + num_tc, bp->max_cos); return -EINVAL; } /* declare amount of supported traffic classes */ if (netdev_set_num_tc(dev, num_tc)) { - DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n", - num_tc); + BNX2X_ERR("failed to declare %d traffic classes\n", num_tc); return -EINVAL; } /* configure priority to traffic class mapping */ for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]); - DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", + DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, + "mapping priority %d to tc %d\n", prio, bp->prio_to_cos[prio]); } - - /* Use this configuration to diffrentiate tc0 from other COSes + /* Use this configuration to differentiate tc0 from other COSes This can be used for ets or pfc, and save the effort of setting up a multio class queue disc or negotiating DCBX with a switch netdev_set_prio_tc_map(dev, 0, 0); @@ -2977,9 +4165,10 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) /* configure traffic class to transmission queue mapping */ for (cos = 0; cos < bp->max_cos; cos++) { count = BNX2X_NUM_ETH_QUEUES(bp); - offset = cos * MAX_TXQS_PER_COS; + offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp); netdev_set_tc_queue(dev, cos, count, offset); - DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n", + DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, + "mapping tc %d to offset %d count %d\n", cos, offset, count); } @@ -2993,13 +4182,16 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) struct bnx2x *bp = netdev_priv(dev); int rc = 0; - if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) + if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) { + BNX2X_ERR("Requested MAC address is not valid\n"); return -EINVAL; + } -#ifdef BCM_CNIC - if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data)) + if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) && + !is_zero_ether_addr(addr->sa_data)) { + BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n"); return -EINVAL; -#endif + } if (netif_running(dev)) { rc = bnx2x_set_eth_mac(bp, false); @@ -3022,13 +4214,11 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) u8 cos; /* Common */ -#ifdef BCM_CNIC + if (IS_FCOE_IDX(fp_index)) { memset(sb, 0, sizeof(union host_hc_status_block)); fp->status_blk_mapping = 0; - } else { -#endif /* status blocks */ if (!CHIP_IS_E1x(bp)) BNX2X_PCI_FREE(sb->e2_sb, @@ -3040,9 +4230,8 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) bnx2x_fp(bp, fp_index, status_blk_mapping), sizeof(struct host_hc_status_block_e1x)); -#ifdef BCM_CNIC } -#endif + /* Rx */ if (!skip_rx_queue(bp, fp_index)) { bnx2x_free_rx_bds(fp); @@ -3069,9 +4258,9 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) if (!skip_tx_queue(bp, fp_index)) { /* fastpath tx rings: tx_buf tx_desc */ for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; + struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; - DP(BNX2X_MSG_SP, + DP(NETIF_MSG_IFDOWN, "freeing tx memory of fp %d cos %d cid %d\n", fp_index, cos, txdata->cid); @@ -3084,14 +4273,21 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) /* end of fastpath */ } +static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp) +{ + int i; + for_each_cnic_queue(bp, i) + bnx2x_free_fp_mem_at(bp, i); +} + void bnx2x_free_fp_mem(struct bnx2x *bp) { int i; - for_each_queue(bp, i) + for_each_eth_queue(bp, i) bnx2x_free_fp_mem_at(bp, i); } -static inline void set_sb_shortcuts(struct bnx2x *bp, int index) +static void set_sb_shortcuts(struct bnx2x *bp, int index) { union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); if (!CHIP_IS_E1x(bp)) { @@ -3107,6 +4303,63 @@ static inline void set_sb_shortcuts(struct bnx2x *bp, int index) } } +/* Returns the number of actually allocated BDs */ +static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, + int rx_ring_size) +{ + struct bnx2x *bp = fp->bp; + u16 ring_prod, cqe_ring_prod; + int i, failure_cnt = 0; + + fp->rx_comp_cons = 0; + cqe_ring_prod = ring_prod = 0; + + /* This routine is called only during fo init so + * fp->eth_q_stats.rx_skb_alloc_failed = 0 + */ + for (i = 0; i < rx_ring_size; i++) { + if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) { + failure_cnt++; + continue; + } + ring_prod = NEXT_RX_IDX(ring_prod); + cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); + WARN_ON(ring_prod <= (i - failure_cnt)); + } + + if (failure_cnt) + BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n", + i - failure_cnt, fp->index); + + fp->rx_bd_prod = ring_prod; + /* Limit the CQE producer by the CQE ring size */ + fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, + cqe_ring_prod); + fp->rx_pkt = fp->rx_calls = 0; + + bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; + + return i - failure_cnt; +} + +static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) +{ + int i; + + for (i = 1; i <= NUM_RCQ_RINGS; i++) { + struct eth_rx_cqe_next_page *nextpg; + + nextpg = (struct eth_rx_cqe_next_page *) + &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; + nextpg->addr_hi = + cpu_to_le32(U64_HI(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + nextpg->addr_lo = + cpu_to_le32(U64_LO(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + } +} + static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) { union host_hc_status_block *sb; @@ -3115,16 +4368,24 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) u8 cos; int rx_ring_size = 0; -#ifdef BCM_CNIC - if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) { + if (!bp->rx_ring_size && + (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { rx_ring_size = MIN_RX_SIZE_NONTPA; bp->rx_ring_size = rx_ring_size; - } else -#endif - if (!bp->rx_ring_size) { - + } else if (!bp->rx_ring_size) { rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); + if (CHIP_IS_E3(bp)) { + u32 cfg = SHMEM_RD(bp, + dev_info.port_hw_config[BP_PORT(bp)]. + default_cfg); + + /* Decrease ring size for 1G functions */ + if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) == + PORT_HW_CFG_NET_SERDES_IF_SGMII) + rx_ring_size /= 10; + } + /* allocate at least number of buffers required by FW */ rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA, rx_ring_size); @@ -3133,23 +4394,25 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) } else /* if rx_ring_size specified - use it */ rx_ring_size = bp->rx_ring_size; + DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size); + /* Common */ sb = &bnx2x_fp(bp, index, status_blk); -#ifdef BCM_CNIC + if (!IS_FCOE_IDX(index)) { -#endif /* status blocks */ - if (!CHIP_IS_E1x(bp)) - BNX2X_PCI_ALLOC(sb->e2_sb, - &bnx2x_fp(bp, index, status_blk_mapping), - sizeof(struct host_hc_status_block_e2)); - else - BNX2X_PCI_ALLOC(sb->e1x_sb, - &bnx2x_fp(bp, index, status_blk_mapping), - sizeof(struct host_hc_status_block_e1x)); -#ifdef BCM_CNIC + if (!CHIP_IS_E1x(bp)) { + sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), + sizeof(struct host_hc_status_block_e2)); + if (!sb->e2_sb) + goto alloc_mem_err; + } else { + sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), + sizeof(struct host_hc_status_block_e1x)); + if (!sb->e1x_sb) + goto alloc_mem_err; + } } -#endif /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to * set shortcuts for it. @@ -3161,40 +4424,55 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) if (!skip_tx_queue(bp, index)) { /* fastpath tx rings: tx_buf tx_desc */ for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; + struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; - DP(BNX2X_MSG_SP, "allocating tx memory of " - "fp %d cos %d\n", + DP(NETIF_MSG_IFUP, + "allocating tx memory of fp %d cos %d\n", index, cos); - BNX2X_ALLOC(txdata->tx_buf_ring, - sizeof(struct sw_tx_bd) * NUM_TX_BD); - BNX2X_PCI_ALLOC(txdata->tx_desc_ring, - &txdata->tx_desc_mapping, - sizeof(union eth_tx_bd_types) * NUM_TX_BD); + txdata->tx_buf_ring = kcalloc(NUM_TX_BD, + sizeof(struct sw_tx_bd), + GFP_KERNEL); + if (!txdata->tx_buf_ring) + goto alloc_mem_err; + txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping, + sizeof(union eth_tx_bd_types) * NUM_TX_BD); + if (!txdata->tx_desc_ring) + goto alloc_mem_err; } } /* Rx */ if (!skip_rx_queue(bp, index)) { /* fastpath rx rings: rx_buf rx_desc rx_comp */ - BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring), - sizeof(struct sw_rx_bd) * NUM_RX_BD); - BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring), - &bnx2x_fp(bp, index, rx_desc_mapping), - sizeof(struct eth_rx_bd) * NUM_RX_BD); + bnx2x_fp(bp, index, rx_buf_ring) = + kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL); + if (!bnx2x_fp(bp, index, rx_buf_ring)) + goto alloc_mem_err; + bnx2x_fp(bp, index, rx_desc_ring) = + BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping), + sizeof(struct eth_rx_bd) * NUM_RX_BD); + if (!bnx2x_fp(bp, index, rx_desc_ring)) + goto alloc_mem_err; - BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring), - &bnx2x_fp(bp, index, rx_comp_mapping), - sizeof(struct eth_fast_path_rx_cqe) * - NUM_RCQ_BD); + /* Seed all CQEs by 1s */ + bnx2x_fp(bp, index, rx_comp_ring) = + BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping), + sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD); + if (!bnx2x_fp(bp, index, rx_comp_ring)) + goto alloc_mem_err; /* SGE ring */ - BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), - sizeof(struct sw_rx_page) * NUM_RX_SGE); - BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring), - &bnx2x_fp(bp, index, rx_sge_mapping), - BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); + bnx2x_fp(bp, index, rx_page_ring) = + kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page), + GFP_KERNEL); + if (!bnx2x_fp(bp, index, rx_page_ring)) + goto alloc_mem_err; + bnx2x_fp(bp, index, rx_sge_ring) = + BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping), + BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); + if (!bnx2x_fp(bp, index, rx_sge_ring)) + goto alloc_mem_err; /* RX BD ring */ bnx2x_set_next_page_rx_bd(fp); @@ -3226,31 +4504,31 @@ alloc_mem_err: return 0; } -int bnx2x_alloc_fp_mem(struct bnx2x *bp) +static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp) +{ + if (!NO_FCOE(bp)) + /* FCoE */ + if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp))) + /* we will fail load process instead of mark + * NO_FCOE_FLAG + */ + return -ENOMEM; + + return 0; +} + +static int bnx2x_alloc_fp_mem(struct bnx2x *bp) { int i; - /** - * 1. Allocate FP for leading - fatal if error - * 2. {CNIC} Allocate FCoE FP - fatal if error - * 3. {CNIC} Allocate OOO + FWD - disable OOO if error - * 4. Allocate RSS - fix number of queues if error + /* 1. Allocate FP for leading - fatal if error + * 2. Allocate RSS - fix number of queues if error */ /* leading */ if (bnx2x_alloc_fp_mem_at(bp, 0)) return -ENOMEM; -#ifdef BCM_CNIC - if (!NO_FCOE(bp)) - /* FCoE */ - if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) - /* we will fail load process instead of mark - * NO_FCOE_FLAG - */ - return -ENOMEM; -#endif - /* RSS */ for_each_nondefault_eth_queue(bp, i) if (bnx2x_alloc_fp_mem_at(bp, i)) @@ -3261,17 +4539,18 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; WARN_ON(delta < 0); -#ifdef BCM_CNIC - /** - * move non eth FPs next to last eth FP - * must be done in that order - * FCOE_IDX < FWD_IDX < OOO_IDX - */ + bnx2x_shrink_eth_fp(bp, delta); + if (CNIC_SUPPORT(bp)) + /* move non eth FPs next to last eth FP + * must be done in that order + * FCOE_IDX < FWD_IDX < OOO_IDX + */ - /* move FCoE fp even NO_FCOE_FLAG is on */ - bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta); -#endif - bp->num_queues -= delta; + /* move FCoE fp even NO_FCOE_FLAG is on */ + bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); + bp->num_ethernet_queues -= delta; + bp->num_queues = bp->num_ethernet_queues + + bp->num_cnic_queues; BNX2X_ERR("Adjusted num of queues from %d to %d\n", bp->num_queues + delta, bp->num_queues); } @@ -3281,31 +4560,76 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) void bnx2x_free_mem_bp(struct bnx2x *bp) { + int i; + + for (i = 0; i < bp->fp_array_size; i++) + kfree(bp->fp[i].tpa_info); kfree(bp->fp); + kfree(bp->sp_objs); + kfree(bp->fp_stats); + kfree(bp->bnx2x_txq); kfree(bp->msix_table); kfree(bp->ilt); } -int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) +int bnx2x_alloc_mem_bp(struct bnx2x *bp) { struct bnx2x_fastpath *fp; struct msix_entry *tbl; struct bnx2x_ilt *ilt; int msix_table_size = 0; + int fp_array_size, txq_array_size; + int i; /* * The biggest MSI-X table we might need is as a maximum number of fast - * path IGU SBs plus default SB (for PF). + * path IGU SBs plus default SB (for PF only). */ - msix_table_size = bp->igu_sb_cnt + 1; + msix_table_size = bp->igu_sb_cnt; + if (IS_PF(bp)) + msix_table_size++; + BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size); /* fp array: RSS plus CNIC related L2 queues */ - fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE, - sizeof(*fp), GFP_KERNEL); + fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp); + bp->fp_array_size = fp_array_size; + BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size); + + fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL); if (!fp) goto alloc_err; + for (i = 0; i < bp->fp_array_size; i++) { + fp[i].tpa_info = + kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2, + sizeof(struct bnx2x_agg_info), GFP_KERNEL); + if (!(fp[i].tpa_info)) + goto alloc_err; + } + bp->fp = fp; + /* allocate sp objs */ + bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs), + GFP_KERNEL); + if (!bp->sp_objs) + goto alloc_err; + + /* allocate fp_stats */ + bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats), + GFP_KERNEL); + if (!bp->fp_stats) + goto alloc_err; + + /* Allocate memory for the transmission queues array */ + txq_array_size = + BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp); + BNX2X_DEV_INFO("txq_array_size %d", txq_array_size); + + bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata), + GFP_KERNEL); + if (!bp->bnx2x_txq) + goto alloc_err; + /* msix table */ tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL); if (!tbl) @@ -3322,7 +4646,6 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) alloc_err: bnx2x_free_mem_bp(bp); return -ENOMEM; - } int bnx2x_reload_if_running(struct net_device *dev) @@ -3332,7 +4655,7 @@ int bnx2x_reload_if_running(struct net_device *dev) if (unlikely(!netif_running(dev))) return 0; - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); return bnx2x_nic_load(bp, LOAD_NORMAL); } @@ -3364,13 +4687,12 @@ int bnx2x_get_cur_phy_idx(struct bnx2x *bp) } return sel_phy_idx; - } int bnx2x_get_link_cfg_idx(struct bnx2x *bp) { u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); /* - * The selected actived PHY is always after swapping (in case PHY + * The selected activated PHY is always after swapping (in case PHY * swapping is enabled). So when swapping is enabled, we need to reverse * the configuration */ @@ -3385,7 +4707,7 @@ int bnx2x_get_link_cfg_idx(struct bnx2x *bp) return LINK_CONFIG_IDX(sel_phy_idx); } -#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) +#ifdef NETDEV_FCOE_WWNN int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) { struct bnx2x *bp = netdev_priv(dev); @@ -3401,6 +4723,7 @@ int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) cp->fcoe_wwn_port_name_lo); break; default: + BNX2X_ERR("Wrong WWN type requested - %d\n", type); return -EINVAL; } @@ -3414,13 +4737,15 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) struct bnx2x *bp = netdev_priv(dev); if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - pr_err("Handling parity error recovery. Try again later\n"); + BNX2X_ERR("Can't perform change MTU during parity recovery\n"); return -EAGAIN; } if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || - ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) + ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) { + BNX2X_ERR("Can't support requested MTU size\n"); return -EINVAL; + } /* This does not race with packet allocation * because the actual alloc size is @@ -3432,13 +4757,15 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) } netdev_features_t bnx2x_fix_features(struct net_device *dev, - netdev_features_t features) + netdev_features_t features) { struct bnx2x *bp = netdev_priv(dev); /* TPA requires Rx CSUM offloading */ - if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) + if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) { features &= ~NETIF_F_LRO; + features &= ~NETIF_F_GRO; + } return features; } @@ -3447,6 +4774,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) { struct bnx2x *bp = netdev_priv(dev); u32 flags = bp->flags; + u32 changes; bool bnx2x_reload = false; if (features & NETIF_F_LRO) @@ -3454,6 +4782,11 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) else flags &= ~TPA_ENABLE_FLAG; + if (features & NETIF_F_GRO) + flags |= GRO_ENABLE_FLAG; + else + flags &= ~GRO_ENABLE_FLAG; + if (features & NETIF_F_LOOPBACK) { if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { bp->link_params.loopback_mode = LOOPBACK_BMAC; @@ -3466,10 +4799,16 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) } } - if (flags ^ bp->flags) { - bp->flags = flags; + changes = flags ^ bp->flags; + + /* if GRO is changed while LRO is enabled, don't force a reload */ + if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) + changes &= ~GRO_ENABLE_FLAG; + + if (changes) bnx2x_reload = true; - } + + bp->flags = flags; if (bnx2x_reload) { if (bp->recovery_state == BNX2X_RECOVERY_DONE) @@ -3489,12 +4828,8 @@ void bnx2x_tx_timeout(struct net_device *dev) bnx2x_panic(); #endif - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - /* This allows the netif to be shutdown gracefully before resetting */ - schedule_delayed_work(&bp->sp_rtnl_task, 0); + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0); } int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) @@ -3519,7 +4854,7 @@ int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) netif_device_detach(dev); - bnx2x_nic_unload(bp, UNLOAD_CLOSE); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); @@ -3541,7 +4876,7 @@ int bnx2x_resume(struct pci_dev *pdev) bp = netdev_priv(dev); if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - pr_err("Handling parity error recovery. Try again later\n"); + BNX2X_ERR("Handling parity error recovery. Try again later\n"); return -EAGAIN; } @@ -3557,8 +4892,6 @@ int bnx2x_resume(struct pci_dev *pdev) bnx2x_set_power_state(bp, PCI_D0); netif_device_attach(dev); - /* Since the chip was reset, clear the FW sequence number */ - bp->fw_seq = 0; rc = bnx2x_nic_load(bp, LOAD_OPEN); rtnl_unlock(); @@ -3566,10 +4899,14 @@ int bnx2x_resume(struct pci_dev *pdev) return rc; } - void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, u32 cid) { + if (!cxt) { + BNX2X_ERR("bad context pointer %p\n", cxt); + return; + } + /* ustorm cxt validation */ cxt->ustorm_ag_context.cdu_usage = CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), @@ -3580,32 +4917,33 @@ void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); } -static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, - u8 fw_sb_id, u8 sb_index, - u8 ticks) +static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, + u8 fw_sb_id, u8 sb_index, + u8 ticks) { - u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); REG_WR8(bp, addr, ticks); - DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n", - port, fw_sb_id, sb_index, ticks); + DP(NETIF_MSG_IFUP, + "port %x fw_sb_id %d sb_index %d ticks %d\n", + port, fw_sb_id, sb_index, ticks); } -static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, - u16 fw_sb_id, u8 sb_index, - u8 disable) +static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, + u16 fw_sb_id, u8 sb_index, + u8 disable) { u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); - u16 flags = REG_RD16(bp, addr); + u8 flags = REG_RD8(bp, addr); /* clear and set */ flags &= ~HC_INDEX_DATA_HC_ENABLED; flags |= enable_flag; - REG_WR16(bp, addr, flags); - DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n", - port, fw_sb_id, sb_index, disable); + REG_WR8(bp, addr, flags); + DP(NETIF_MSG_IFUP, + "port %x fw_sb_id %d sb_index %d disable %d\n", + port, fw_sb_id, sb_index, disable); } void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, @@ -3619,3 +4957,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, disable = disable ? 1 : (usec ? 0 : 1); storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); } + +void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, + u32 verbose) +{ + smp_mb__before_atomic(); + set_bit(flag, &bp->sp_rtnl_state); + smp_mb__after_atomic(); + DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n", + flag); + schedule_delayed_work(&bp->sp_rtnl_task, 0); +} +EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index bf27c54ff2e..571427c7226 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -1,12 +1,12 @@ /* bnx2x_cmn.h: Broadcom Everest network driver. * - * Copyright (c) 2007-2011 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver * UDP CSUM errata workaround by Arik Gendelman @@ -21,14 +21,14 @@ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> - +#include <linux/irq.h> #include "bnx2x.h" +#include "bnx2x_sriov.h" /* This is used as a replacement for an MCP if it's not present */ -extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ - -extern int num_queues; +extern int bnx2x_load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ +extern int bnx2x_num_queues; /************************ Macros ********************************/ #define BNX2X_PCI_FREE(x, y, size) \ @@ -48,20 +48,26 @@ extern int num_queues; } \ } while (0) -#define BNX2X_PCI_ALLOC(x, y, size) \ - do { \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ - if (x == NULL) \ - goto alloc_mem_err; \ - memset((void *)x, 0, size); \ - } while (0) - -#define BNX2X_ALLOC(x, size) \ - do { \ - x = kzalloc(size, GFP_KERNEL); \ - if (x == NULL) \ - goto alloc_mem_err; \ - } while (0) +#define BNX2X_PCI_ALLOC(y, size) \ +({ \ + void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x) \ + DP(NETIF_MSG_HW, \ + "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ + (unsigned long long)(*y), x); \ + x; \ +}) +#define BNX2X_PCI_FALLOC(y, size) \ +({ \ + void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x) { \ + memset(x, 0xff, size); \ + DP(NETIF_MSG_HW, \ + "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \ + (unsigned long long)(*y), x); \ + } \ + x; \ +}) /*********************** Interfaces **************************** * Functions that need to be implemented by each driver version @@ -82,17 +88,21 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. * * @bp: driver handle + * @keep_link: true iff link should be kept up */ -void bnx2x_send_unload_done(struct bnx2x *bp); +void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link); /** - * bnx2x_config_rss_pf - configure RSS parameters. + * bnx2x_config_rss_pf - configure RSS parameters in a PF. * * @bp: driver handle + * @rss_obj: RSS object to use * @ind_table: indirection table to configure * @config_hash: re-configure RSS hash keys configuration + * @enable: enabled or disabled configuration */ -int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); +int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, + bool config_hash, bool enable); /** * bnx2x__init_func_obj - init function object @@ -140,7 +150,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); * @bp: driver handle * @load_mode: current mode */ -u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); +int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); /** * bnx2x_link_set - configure hw according to link parameters structure. @@ -150,6 +160,14 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); void bnx2x_link_set(struct bnx2x *bp); /** + * bnx2x_force_link_reset - Forces link reset, and put the PHY + * in reset as well. + * + * @bp: driver handle + */ +void bnx2x_force_link_reset(struct bnx2x *bp); + +/** * bnx2x_link_test - query link status. * * @bp: driver handle @@ -184,6 +202,7 @@ void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, /* Disable transactions from chip to host */ void bnx2x_pf_disable(struct bnx2x *bp); +int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val); /** * bnx2x__link_status_update - handles link status change. @@ -226,7 +245,6 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); * @dev_instance: private instance */ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); -#ifdef BCM_CNIC /** * bnx2x_cnic_notify - send command to cnic driver @@ -242,7 +260,13 @@ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); * @bp: driver handle */ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); -#endif + +/** + * bnx2x_setup_cnic_info - provides cnic with updated info + * + * @bp: driver handle + */ +void bnx2x_setup_cnic_info(struct bnx2x *bp); /** * bnx2x_int_enable - enable HW interrupts. @@ -263,7 +287,7 @@ void bnx2x_int_enable(struct bnx2x *bp); void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); /** - * bnx2x_nic_init - init driver internals. + * bnx2x_nic_init_cnic - init driver internals for cnic. * * @bp: driver handle * @load_code: COMMON, PORT or FUNCTION @@ -273,9 +297,39 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); * - status blocks * - etc. */ -void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); +void bnx2x_nic_init_cnic(struct bnx2x *bp); /** + * bnx2x_preirq_nic_init - init driver internals. + * + * @bp: driver handle + * + * Initializes: + * - fastpath object + * - fastpath rings + * etc. + */ +void bnx2x_pre_irq_nic_init(struct bnx2x *bp); + +/** + * bnx2x_postirq_nic_init - init driver internals. + * + * @bp: driver handle + * @load_code: COMMON, PORT or FUNCTION + * + * Initializes: + * - status blocks + * - slowpath rings + * - etc. + */ +void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code); +/** + * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic. + * + * @bp: driver handle + */ +int bnx2x_alloc_mem_cnic(struct bnx2x *bp); +/** * bnx2x_alloc_mem - allocate driver's memory. * * @bp: driver handle @@ -283,6 +337,12 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); int bnx2x_alloc_mem(struct bnx2x *bp); /** + * bnx2x_free_mem_cnic - release driver's memory for cnic. + * + * @bp: driver handle + */ +void bnx2x_free_mem_cnic(struct bnx2x *bp); +/** * bnx2x_free_mem - release driver's memory. * * @bp: driver handle @@ -301,12 +361,13 @@ void bnx2x_set_num_queues(struct bnx2x *bp); * * @bp: driver handle * @unload_mode: COMMON, PORT, FUNCTION + * @keep_link: true iff link should be kept up. * * - Cleanup MAC configuration. * - Closes clients. * - etc. */ -void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link); /** * bnx2x_acquire_hw_lock - acquire HW lock. @@ -350,42 +411,17 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); * If bp->state is OPEN, should be called with * netif_addr_lock_bh() */ -void bnx2x_set_rx_mode(struct net_device *dev); - -/** - * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. - * - * @bp: driver handle - * - * If bp->state is OPEN, should be called with - * netif_addr_lock_bh(). - */ -void bnx2x_set_storm_rx_mode(struct bnx2x *bp); - -/** - * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. - * - * @bp: driver handle - * @cl_id: client id - * @rx_mode_flags: rx mode configuration - * @rx_accept_flags: rx accept configuration - * @tx_accept_flags: tx accept configuration (tx switch) - * @ramrod_flags: ramrod configuration - */ -void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, - unsigned long rx_mode_flags, - unsigned long rx_accept_flags, - unsigned long tx_accept_flags, - unsigned long ramrod_flags); +void bnx2x_set_rx_mode_inner(struct bnx2x *bp); /* Parity errors related */ -void bnx2x_inc_load_cnt(struct bnx2x *bp); -u32 bnx2x_dec_load_cnt(struct bnx2x *bp); +void bnx2x_set_pf_load(struct bnx2x *bp); +bool bnx2x_clear_pf_load(struct bnx2x *bp); bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); void bnx2x_set_reset_in_progress(struct bnx2x *bp); void bnx2x_set_reset_global(struct bnx2x *bp); void bnx2x_disable_close_the_gate(struct bnx2x *bp); +int bnx2x_init_hw_func_cnic(struct bnx2x *bp); /** * bnx2x_sp_event - handle ramrods completion. @@ -403,11 +439,19 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); void bnx2x_ilt_set_info(struct bnx2x *bp); /** + * bnx2x_ilt_set_cnic_info - prepare ILT configurations for SRC + * and TM. + * + * @bp: driver handle + */ +void bnx2x_ilt_set_info_cnic(struct bnx2x *bp); + +/** * bnx2x_dcbx_init - initialize dcbx protocol. * * @bp: driver handle */ -void bnx2x_dcbx_init(struct bnx2x *bp); +void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem); /** * bnx2x_set_power_state - set power state to the requested value. @@ -427,12 +471,10 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); */ void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); /* Error handling */ -void bnx2x_panic_dump(struct bnx2x *bp); - void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); /* dev_close main block */ -int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link); /* dev_open main block */ int bnx2x_nic_load(struct bnx2x *bp, int load_mode); @@ -443,20 +485,53 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); /* setup_tc callback */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); +int bnx2x_get_vf_config(struct net_device *dev, int vf, + struct ifla_vf_info *ivi); +int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); +int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); + /* select_queue callback */ -u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback); + +static inline void bnx2x_update_rx_prod(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + u16 bd_prod, u16 rx_comp_prod, + u16 rx_sge_prod) +{ + struct ustorm_eth_rx_producers rx_prods = {0}; + u32 i; + + /* Update producers */ + rx_prods.bd_prod = bd_prod; + rx_prods.cqe_prod = rx_comp_prod; + rx_prods.sge_prod = rx_sge_prod; + + /* Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + * This is only applicable for weak-ordered memory model archs such + * as IA-64. The following barrier is also mandatory since FW will + * assumes BDs must have buffers. + */ + wmb(); + + for (i = 0; i < sizeof(rx_prods)/4; i++) + REG_WR(bp, fp->ustorm_rx_prods_offset + i*4, + ((u32 *)&rx_prods)[i]); + + mmiowb(); /* keep prod updates ordered */ + + DP(NETIF_MSG_RX_STATUS, + "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", + fp->index, bd_prod, rx_comp_prod, rx_sge_prod); +} /* reload helper */ int bnx2x_reload_if_running(struct net_device *dev); int bnx2x_change_mac_addr(struct net_device *dev, void *p); -/* NAPI poll Rx part */ -int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); - -void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, - u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); - /* NAPI poll Tx part */ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); @@ -468,11 +543,12 @@ int bnx2x_resume(struct pci_dev *pdev); void bnx2x_free_irq(struct bnx2x *bp); void bnx2x_free_fp_mem(struct bnx2x *bp); -int bnx2x_alloc_fp_mem(struct bnx2x *bp); void bnx2x_init_rx_rings(struct bnx2x *bp); +void bnx2x_init_rx_rings_cnic(struct bnx2x *bp); void bnx2x_free_skbs(struct bnx2x *bp); void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); void bnx2x_netif_start(struct bnx2x *bp); +int bnx2x_load_cnic(struct bnx2x *bp); /** * bnx2x_enable_msix - set msix configuration. @@ -492,20 +568,18 @@ int bnx2x_enable_msix(struct bnx2x *bp); int bnx2x_enable_msi(struct bnx2x *bp); /** - * bnx2x_poll - NAPI callback + * bnx2x_low_latency_recv - LL callback * * @napi: napi structure - * @budget: - * */ -int bnx2x_poll(struct napi_struct *napi, int budget); +int bnx2x_low_latency_recv(struct napi_struct *napi); /** * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure * * @bp: driver handle */ -int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp); +int bnx2x_alloc_mem_bp(struct bnx2x *bp); /** * bnx2x_free_mem_bp - release memories outsize main driver structure @@ -523,7 +597,7 @@ void bnx2x_free_mem_bp(struct bnx2x *bp); */ int bnx2x_change_mtu(struct net_device *dev, int new_mtu); -#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) +#ifdef NETDEV_FCOE_WWNN /** * bnx2x_fcoe_get_wwn - return the requested WWN value for this port * @@ -534,8 +608,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu); */ int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); #endif + netdev_features_t bnx2x_fix_features(struct net_device *dev, - netdev_features_t features); + netdev_features_t features); int bnx2x_set_features(struct net_device *dev, netdev_features_t features); /** @@ -553,38 +628,6 @@ static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; } -static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 bd_prod, - u16 rx_comp_prod, u16 rx_sge_prod, u32 start) -{ - struct ustorm_eth_rx_producers rx_prods = {0}; - u32 i; - - /* Update producers */ - rx_prods.bd_prod = bd_prod; - rx_prods.cqe_prod = rx_comp_prod; - rx_prods.sge_prod = rx_sge_prod; - - /* - * Make sure that the BD and SGE data is updated before updating the - * producers since FW might read the BD/SGE right after the producer - * is updated. - * This is only applicable for weak-ordered memory model archs such - * as IA-64. The following barrier is also mandatory since FW will - * assumes BDs must have buffers. - */ - wmb(); - - for (i = 0; i < sizeof(rx_prods)/4; i++) - REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]); - - mmiowb(); /* keep prod updates ordered */ - - DP(NETIF_MSG_RX_STATUS, - "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", - fp->index, bd_prod, rx_comp_prod, rx_sge_prod); -} - static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, u8 segment, u16 index, u8 op, u8 update, u32 igu_addr) @@ -597,7 +640,7 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, (update << IGU_REGULAR_BUPDATE_SHIFT) | (op << IGU_REGULAR_ENABLE_INT_SHIFT)); - DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n", + DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", cmd_data.sb_id_and_flags, igu_addr); REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); @@ -606,54 +649,6 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, barrier(); } -static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, - u8 idu_sb_id, bool is_Pf) -{ - u32 data, ctl, cnt = 100; - u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; - u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; - u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; - u32 sb_bit = 1 << (idu_sb_id%32); - u32 func_encode = func | - ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT); - u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; - - /* Not supported in BC mode */ - if (CHIP_INT_MODE_IS_BC(bp)) - return; - - data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup - << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | - IGU_REGULAR_CLEANUP_SET | - IGU_REGULAR_BCLEANUP; - - ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | - func_encode << IGU_CTRL_REG_FID_SHIFT | - IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; - - DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", - data, igu_addr_data); - REG_WR(bp, igu_addr_data, data); - mmiowb(); - barrier(); - DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", - ctl, igu_addr_ctl); - REG_WR(bp, igu_addr_ctl, ctl); - mmiowb(); - barrier(); - - /* wait for clean up to finish */ - while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) - msleep(20); - - - if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { - DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: " - "idu_sb_id %d offset %d bit %d (cnt %d)\n", - idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); - } -} - static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, u8 storm, u16 index, u8 op, u8 update) { @@ -668,8 +663,6 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); - DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", - (*(u32 *)&igu_ack), hc_addr); REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); /* Make sure that ACK is written */ @@ -703,9 +696,6 @@ static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) COMMAND_REG_SIMD_MASK); u32 result = REG_RD(bp, hc_addr); - DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", - result, hc_addr); - barrier(); return result; } @@ -715,7 +705,7 @@ static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); u32 result = REG_RD(bp, igu_addr); - DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n", + DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n", result, igu_addr); barrier(); @@ -748,17 +738,15 @@ static inline u16 bnx2x_tx_avail(struct bnx2x *bp, prod = txdata->tx_bd_prod; cons = txdata->tx_bd_cons; - /* NUM_TX_RINGS = number of "next-page" entries - It will be used as a threshold */ - used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; + used = SUB_S16(prod, cons); #ifdef BNX2X_STOP_ON_ERROR WARN_ON(used < 0); - WARN_ON(used > bp->tx_ring_size); - WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL); + WARN_ON(used > txdata->tx_ring_size); + WARN_ON((txdata->tx_ring_size - used) > MAX_TX_AVAIL); #endif - return (s16)(bp->tx_ring_size) - used; + return (s16)(txdata->tx_ring_size) - used; } static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) @@ -775,21 +763,23 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) { u8 cos; for_each_cos_in_tx_queue(fp, cos) - if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) + if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) return true; return false; } +#define BNX2X_IS_CQE_COMPLETED(cqe_fp) (cqe_fp->marker == 0x0) +#define BNX2X_SEED_CQE(cqe_fp) (cqe_fp->marker = 0xFFFFFFFF) static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) { - u16 rx_cons_sb; + u16 cons; + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_cqe *cqe_fp; - /* Tell compiler that status block fields can change */ - barrier(); - rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); - if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) - rx_cons_sb++; - return (fp->rx_comp_cons != rx_cons_sb); + cons = RCQ_BD(fp->rx_comp_cons); + cqe = &fp->rx_comp_ring[cons]; + cqe_fp = &cqe->fast_path_cqe; + return BNX2X_IS_CQE_COMPLETED(cqe_fp); } /** @@ -815,7 +805,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp, return; dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), - SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + SGE_PAGES, DMA_FROM_DEVICE); __free_pages(page, PAGES_PER_SGE_SHIFT); sw_buf->page = NULL; @@ -823,42 +813,39 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp, sge->addr_lo = 0; } -static inline void bnx2x_add_all_napi(struct bnx2x *bp) +static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) { int i; - /* Add NAPI objects */ - for_each_rx_queue(bp, i) - netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), - bnx2x_poll, BNX2X_NAPI_WEIGHT); + for_each_rx_queue_cnic(bp, i) { + napi_hash_del(&bnx2x_fp(bp, i, napi)); + netif_napi_del(&bnx2x_fp(bp, i, napi)); + } } static inline void bnx2x_del_all_napi(struct bnx2x *bp) { int i; - for_each_rx_queue(bp, i) + for_each_eth_queue(bp, i) { + napi_hash_del(&bnx2x_fp(bp, i, napi)); netif_napi_del(&bnx2x_fp(bp, i, napi)); + } } +int bnx2x_set_int_mode(struct bnx2x *bp); + static inline void bnx2x_disable_msi(struct bnx2x *bp) { if (bp->flags & USING_MSIX_FLAG) { pci_disable_msix(bp->pdev); - bp->flags &= ~USING_MSIX_FLAG; + bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG); } else if (bp->flags & USING_MSI_FLAG) { pci_disable_msi(bp->pdev); bp->flags &= ~USING_MSI_FLAG; } } -static inline int bnx2x_calc_num_queues(struct bnx2x *bp) -{ - return num_queues ? - min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : - min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp)); -} - static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) { int i, j; @@ -885,62 +872,6 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) bnx2x_clear_sge_mask_next_elems(fp); } -static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 index) -{ - struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); - struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; - struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; - dma_addr_t mapping; - - if (unlikely(page == NULL)) - return -ENOMEM; - - mapping = dma_map_page(&bp->pdev->dev, page, 0, - SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - __free_pages(page, PAGES_PER_SGE_SHIFT); - return -ENOMEM; - } - - sw_buf->page = page; - dma_unmap_addr_set(sw_buf, mapping, mapping); - - sge->addr_hi = cpu_to_le32(U64_HI(mapping)); - sge->addr_lo = cpu_to_le32(U64_LO(mapping)); - - return 0; -} - -static inline int bnx2x_alloc_rx_data(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 index) -{ - u8 *data; - struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; - struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; - dma_addr_t mapping; - - data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); - if (unlikely(data == NULL)) - return -ENOMEM; - - mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, - fp->rx_buf_size, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - kfree(data); - return -ENOMEM; - } - - rx_buf->data = data; - dma_unmap_addr_set(rx_buf, mapping, mapping); - - rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - - return 0; -} - /* note that we are not allocating a new buffer, * we are just moving one from cons to prod * we are not creating a new mapping, @@ -962,6 +893,17 @@ static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, /************************* Init ******************************************/ +/* returns func by VN for current port */ +static inline int func_by_vn(struct bnx2x *bp, int vn) +{ + return 2 * vn + BP_PORT(bp); +} + +static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) +{ + return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true); +} + /** * bnx2x_func_start - init function * @@ -971,7 +913,7 @@ static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, */ static inline int bnx2x_func_start(struct bnx2x *bp) { - struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_state_params func_params = {NULL}; struct bnx2x_func_start_params *start_params = &func_params.params.start; @@ -984,15 +926,18 @@ static inline int bnx2x_func_start(struct bnx2x *bp) /* Function parameters */ start_params->mf_mode = bp->mf_mode; start_params->sd_vlan_tag = bp->mf_ov; - if (CHIP_IS_E1x(bp)) - start_params->network_cos_mode = OVERRIDE_COS; - else + + if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) start_params->network_cos_mode = STATIC_COS; + else /* CHIP_IS_E1X */ + start_params->network_cos_mode = FW_WRR; + + start_params->gre_tunnel_mode = L2GRE_TUNNEL; + start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS; return bnx2x_func_state_change(bp, &func_params); } - /** * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format * @@ -1001,8 +946,8 @@ static inline int bnx2x_func_start(struct bnx2x *bp) * @fw_lo: pointer to lower part * @mac: pointer to MAC address */ -static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, - u8 *mac) +static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, + __le16 *fw_lo, u8 *mac) { ((u8 *)fw_hi)[0] = mac[1]; ((u8 *)fw_hi)[1] = mac[0]; @@ -1024,66 +969,6 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, bnx2x_free_rx_sge(bp, fp, i); } -static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, - struct bnx2x_fastpath *fp, int last) -{ - int i; - - for (i = 0; i < last; i++) { - struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; - struct sw_rx_bd *first_buf = &tpa_info->first_buf; - u8 *data = first_buf->data; - - if (data == NULL) { - DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); - continue; - } - if (tpa_info->tpa_state == BNX2X_TPA_START) - dma_unmap_single(&bp->pdev->dev, - dma_unmap_addr(first_buf, mapping), - fp->rx_buf_size, DMA_FROM_DEVICE); - kfree(data); - first_buf->data = NULL; - } -} - -static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) -{ - int i; - - for (i = 1; i <= NUM_TX_RINGS; i++) { - struct eth_tx_next_bd *tx_next_bd = - &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; - - tx_next_bd->addr_hi = - cpu_to_le32(U64_HI(txdata->tx_desc_mapping + - BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); - tx_next_bd->addr_lo = - cpu_to_le32(U64_LO(txdata->tx_desc_mapping + - BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); - } - - SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); - txdata->tx_db.data.zero_fill1 = 0; - txdata->tx_db.data.prod = 0; - - txdata->tx_pkt_prod = 0; - txdata->tx_pkt_cons = 0; - txdata->tx_bd_prod = 0; - txdata->tx_bd_cons = 0; - txdata->tx_pkt = 0; -} - -static inline void bnx2x_init_tx_rings(struct bnx2x *bp) -{ - int i; - u8 cos; - - for_each_tx_queue(bp, i) - for_each_cos_in_tx_queue(&bp->fp[i], cos) - bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); -} - static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) { int i; @@ -1101,88 +986,19 @@ static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) } } -static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) -{ - int i; - - for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { - struct eth_rx_sge *sge; - - sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; - sge->addr_hi = - cpu_to_le32(U64_HI(fp->rx_sge_mapping + - BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); - - sge->addr_lo = - cpu_to_le32(U64_LO(fp->rx_sge_mapping + - BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); - } -} - -static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) -{ - int i; - for (i = 1; i <= NUM_RCQ_RINGS; i++) { - struct eth_rx_cqe_next_page *nextpg; - - nextpg = (struct eth_rx_cqe_next_page *) - &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; - nextpg->addr_hi = - cpu_to_le32(U64_HI(fp->rx_comp_mapping + - BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); - nextpg->addr_lo = - cpu_to_le32(U64_LO(fp->rx_comp_mapping + - BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); - } -} - -/* Returns the number of actually allocated BDs */ -static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, - int rx_ring_size) -{ - struct bnx2x *bp = fp->bp; - u16 ring_prod, cqe_ring_prod; - int i; - - fp->rx_comp_cons = 0; - cqe_ring_prod = ring_prod = 0; - - /* This routine is called only during fo init so - * fp->eth_q_stats.rx_skb_alloc_failed = 0 - */ - for (i = 0; i < rx_ring_size; i++) { - if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) { - fp->eth_q_stats.rx_skb_alloc_failed++; - continue; - } - ring_prod = NEXT_RX_IDX(ring_prod); - cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); - WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed)); - } - - if (fp->eth_q_stats.rx_skb_alloc_failed) - BNX2X_ERR("was only able to allocate " - "%d rx skbs on queue[%d]\n", - (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index); - - fp->rx_bd_prod = ring_prod; - /* Limit the CQE producer by the CQE ring size */ - fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, - cqe_ring_prod); - fp->rx_pkt = fp->rx_calls = 0; - - return i - fp->eth_q_stats.rx_skb_alloc_failed; -} - /* Statistics ID are global per chip/path, while Client IDs for E1x are per * port. */ static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) { - if (!CHIP_IS_E1x(fp->bp)) + struct bnx2x *bp = fp->bp; + if (!CHIP_IS_E1x(bp)) { + /* there are special statistics counters for FCoE 136..140 */ + if (IS_FCOE_FP(fp)) + return bp->cnic_base_cl_id + (bp->pf_num >> 1); return fp->cl_id; - else - return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x; + } + return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x; } static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, @@ -1191,8 +1007,8 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, struct bnx2x *bp = fp->bp; /* Configure classification DBs */ - bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, - BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), + bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id, + fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), bnx2x_sp_mapping(bp, mac_rdata), BNX2X_FILTER_MAC_PENDING, &bp->sp_state, obj_type, @@ -1256,6 +1072,9 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp) bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), bnx2x_get_path_func_num(bp)); + bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1, + bnx2x_get_path_func_num(bp)); + /* RSS configuration object */ bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), @@ -1273,29 +1092,21 @@ static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) return fp->cl_id; } -static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) -{ - struct bnx2x *bp = fp->bp; - - if (!CHIP_IS_E1x(bp)) - return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); - else - return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); -} - static inline void bnx2x_init_txdata(struct bnx2x *bp, - struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, - __le16 *tx_cons_sb) + struct bnx2x_fp_txdata *txdata, u32 cid, + int txq_index, __le16 *tx_cons_sb, + struct bnx2x_fastpath *fp) { txdata->cid = cid; txdata->txq_index = txq_index; txdata->tx_cons_sb = tx_cons_sb; + txdata->parent_fp = fp; + txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size; - DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d\n", + DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", txdata->cid, txdata->txq_index); } -#ifdef BCM_CNIC static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) { return bp->cnic_base_cl_id + cl_idx + @@ -1304,7 +1115,6 @@ static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) { - /* the 'first' id is allocated for the cnic */ return bp->base_fw_ndsb; } @@ -1314,54 +1124,6 @@ static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) return bp->igu_base_sb; } - -static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) -{ - struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); - unsigned long q_type = 0; - - bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); - bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, - BNX2X_FCOE_ETH_CL_ID_IDX); - /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than - * 16 ETH clients per function when CNIC is enabled! - * - * Fix it ASAP!!! - */ - bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; - bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; - bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; - bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; - - bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), - fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); - - DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)\n", fp->index); - - /* qZone id equals to FW (per path) client id */ - bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); - /* init shortcut */ - bnx2x_fcoe(bp, ustorm_rx_prods_offset) = - bnx2x_rx_ustorm_prods_offset(fp); - - /* Configure Queue State object */ - __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); - __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); - - /* No multi-CoS for FCoE L2 client */ - BUG_ON(fp->max_cos != 1); - - bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, - BP_FUNC(bp), bnx2x_sp(bp, q_rdata), - bnx2x_sp_mapping(bp, q_rdata), q_type); - - DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d " - "igu_sb %d\n", - fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, - fp->igu_sb_id); -} -#endif - static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) { @@ -1369,8 +1131,7 @@ static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, while (bnx2x_has_tx_work_unload(txdata)) { if (!cnt) { - BNX2X_ERR("timeout waiting for queue[%d]: " - "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", + BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", txdata->txq_index, txdata->tx_pkt_prod, txdata->tx_pkt_cons); #ifdef BNX2X_STOP_ON_ERROR @@ -1381,7 +1142,7 @@ static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, #endif } cnt--; - usleep_range(1000, 1000); + usleep_range(1000, 2000); } return 0; @@ -1397,30 +1158,6 @@ static inline void __storm_memset_struct(struct bnx2x *bp, REG_WR(bp, addr + (i * 4), data[i]); } -static inline void storm_memset_func_cfg(struct bnx2x *bp, - struct tstorm_eth_function_common_config *tcfg, - u16 abs_fid) -{ - size_t size = sizeof(struct tstorm_eth_function_common_config); - - u32 addr = BAR_TSTRORM_INTMEM + - TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); - - __storm_memset_struct(bp, addr, size, (u32 *)tcfg); -} - -static inline void storm_memset_cmng(struct bnx2x *bp, - struct cmng_struct_per_port *cmng, - u8 port) -{ - size_t size = sizeof(struct cmng_struct_per_port); - - u32 addr = BAR_XSTRORM_INTMEM + - XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); - - __storm_memset_struct(bp, addr, size, (u32 *)cmng); -} - /** * bnx2x_wait_sp_comp - wait for the outstanding SP commands. * @@ -1440,15 +1177,15 @@ static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) } netif_addr_unlock_bh(bp->dev); - usleep_range(1000, 1000); + usleep_range(1000, 2000); } smp_mb(); netif_addr_lock_bh(bp->dev); if (bp->sp_state & mask) { - BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, " - "mask 0x%lx\n", bp->sp_state, mask); + BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n", + bp->sp_state, mask); netif_addr_unlock_bh(bp->dev); return false; } @@ -1484,13 +1221,26 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT; if (!max_cfg) { - DP(NETIF_MSG_LINK, + DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL, "Max BW configured to 0 - using 100 instead\n"); max_cfg = 100; } return max_cfg; } +/* checks if HW supports GRO for given MTU */ +static inline bool bnx2x_mtu_allows_gro(int mtu) +{ + /* gro frags per page */ + int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); + + /* + * 1. Number of frags should not grow above MAX_SKB_FRAGS + * 2. Frag must fit the page + */ + return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; +} + /** * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. * @@ -1499,12 +1249,6 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) */ void bnx2x_get_iscsi_info(struct bnx2x *bp); -/* returns func by VN for current port */ -static inline int func_by_vn(struct bnx2x *bp, int vn) -{ - return 2 * vn + BP_PORT(bp); -} - /** * bnx2x_link_sync_notify - send notification to other functions. * @@ -1539,7 +1283,7 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) { if (SHMEM2_HAS(bp, drv_flags)) { u32 drv_flags; - bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS); + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); drv_flags = SHMEM2_RD(bp, drv_flags); if (set) @@ -1548,20 +1292,35 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) RESET_FLAGS(drv_flags, flags); SHMEM2_WR(bp, drv_flags, drv_flags); - DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); - bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS); + DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); } } static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) { - if (is_valid_ether_addr(addr)) + if (is_valid_ether_addr(addr) || + (is_zero_ether_addr(addr) && + (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))) return true; -#ifdef BCM_CNIC - if (is_zero_ether_addr(addr) && IS_MF_ISCSI_SD(bp)) - return true; -#endif + return false; } +/** + * bnx2x_fill_fw_str - Fill buffer with FW version string + * + * @bp: driver handle + * @buf: character buffer to fill with the fw name + * @buf_len: length of the above buffer + * + */ +void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len); + +int bnx2x_drain_tx_queues(struct bnx2x *bp); +void bnx2x_squeeze_objects(struct bnx2x *bp); + +void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag, + u32 verbose); + #endif /* BNX2X_CMN_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 5051cf3deb2..51a952c51cb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -1,6 +1,6 @@ /* bnx2x_dcb.c: Broadcom Everest network driver. * - * Copyright 2009-2011 Broadcom Corporation + * Copyright 2009-2013 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -12,7 +12,7 @@ * license other than the GPL, without Broadcom's express prior written * consent. * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Dmitry Kravkov * */ @@ -30,10 +30,8 @@ #include "bnx2x_dcb.h" /* forward declarations of dcbx related functions */ -static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); static void bnx2x_pfc_set_pfc(struct bnx2x *bp); static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); -static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, u32 *set_configuration_ets_pg, u32 *pri_pg_tbl); @@ -91,25 +89,21 @@ static void bnx2x_pfc_set(struct bnx2x *bp) /* * Rx COS configuration * Changing PFC RX configuration . - * In RX COS0 will always be configured to lossy and COS1 to lossless + * In RX COS0 will always be configured to lossless and COS1 to lossy */ for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) { pri_bit = 1 << i; - if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)) + if (!(pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))) val |= 1 << (i * 4); } pfc_params.pkt_priority_to_cos = val; /* RX COS0 */ - pfc_params.llfc_low_priority_classes = 0; + pfc_params.llfc_low_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp); /* RX COS1 */ - pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp); - - /* BRB configuration */ - pfc_params.cos0_pauseable = false; - pfc_params.cos1_pauseable = true; + pfc_params.llfc_high_priority_classes = 0; bnx2x_acquire_phy_lock(bp); bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED; @@ -121,26 +115,6 @@ static void bnx2x_pfc_clear(struct bnx2x *bp) { struct bnx2x_nig_brb_pfc_port_params nig_params = {0}; nig_params.pause_enable = 1; -#ifdef BNX2X_SAFC - if (bp->flags & SAFC_TX_FLAG) { - u32 high = 0, low = 0; - int i; - - for (i = 0; i < BNX2X_MAX_PRIORITY; i++) { - if (bp->pri_map[i] == 1) - high |= (1 << i); - if (bp->pri_map[i] == 0) - low |= (1 << i); - } - - nig_params.llfc_low_priority_classes = high; - nig_params.llfc_low_priority_classes = low; - - nig_params.pause_enable = 0; - nig_params.llfc_enable = 1; - nig_params.llfc_out_en = 1; - } -#endif /* BNX2X_SAFC */ bnx2x_acquire_phy_lock(bp); bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED; bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params); @@ -167,27 +141,27 @@ static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp, DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i)); /* pfc */ - DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n", + DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pri_en_bitmap %x\n", features->pfc.pri_en_bitmap); - DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n", + DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pfc_caps %x\n", features->pfc.pfc_caps); - DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n", + DP(BNX2X_MSG_DCB, "dcbx_features.pfc.enabled %x\n", features->pfc.enabled); - DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n", + DP(BNX2X_MSG_DCB, "dcbx_features.app.default_pri %x\n", features->app.default_pri); - DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n", + DP(BNX2X_MSG_DCB, "dcbx_features.app.tc_supported %x\n", features->app.tc_supported); - DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n", + DP(BNX2X_MSG_DCB, "dcbx_features.app.enabled %x\n", features->app.enabled); for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { - DP(NETIF_MSG_LINK, + DP(BNX2X_MSG_DCB, "dcbx_features.app.app_pri_tbl[%x].app_id %x\n", i, features->app.app_pri_tbl[i].app_id); - DP(NETIF_MSG_LINK, + DP(BNX2X_MSG_DCB, "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n", i, features->app.app_pri_tbl[i].pri_bitmap); - DP(NETIF_MSG_LINK, + DP(BNX2X_MSG_DCB, "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n", i, features->app.app_pri_tbl[i].appBitfield); } @@ -221,13 +195,16 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) - DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n"); + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_ERROR\n"); if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH)) - DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_MISMATCH\n"); + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_MISMATCH\n"); + if (GET_FLAGS(error, DCBX_REMOTE_APP_TLV_NOT_FOUND)) + DP(BNX2X_MSG_DCB, "DCBX_REMOTE_APP_TLV_NOT_FOUND\n"); if (app->enabled && - !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) { + !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH | + DCBX_REMOTE_APP_TLV_NOT_FOUND)) { bp->dcbx_port_params.app.enabled = true; @@ -256,7 +233,7 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, LLFC_TRAFFIC_TYPE_ISCSI); } } else { - DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n"); + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_DISABLED\n"); bp->dcbx_port_params.app.enabled = false; for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++) ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY; @@ -274,10 +251,11 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, memset(&pg_help_data, 0, sizeof(struct pg_help_data)); - if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR)) - DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n"); + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ERROR\n"); + if (GET_FLAGS(error, DCBX_REMOTE_ETS_TLV_NOT_FOUND)) + DP(BNX2X_MSG_DCB, "DCBX_REMOTE_ETS_TLV_NOT_FOUND\n"); /* Clean up old settings of ets on COS */ for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) { @@ -287,10 +265,10 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, cos_params[i].pri_bitmask = 0; } - if (bp->dcbx_port_params.app.enabled && - !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) && - ets->enabled) { - DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n"); + if (bp->dcbx_port_params.app.enabled && ets->enabled && + !GET_FLAGS(error, + DCBX_LOCAL_ETS_ERROR | DCBX_REMOTE_ETS_TLV_NOT_FOUND)) { + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ENABLE\n"); bp->dcbx_port_params.ets.enabled = true; bnx2x_dcbx_get_ets_pri_pg_tbl(bp, @@ -305,7 +283,7 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, ets, pg_pri_orginal_spread); } else { - DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n"); + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_DISABLED\n"); bp->dcbx_port_params.ets.enabled = false; ets->pri_pg_tbl[0] = 0; @@ -317,18 +295,19 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp, struct dcbx_pfc_feature *pfc, u32 error) { - if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR)) - DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n"); + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_ERROR\n"); - if (bp->dcbx_port_params.app.enabled && - !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH) && - pfc->enabled) { + if (GET_FLAGS(error, DCBX_REMOTE_PFC_TLV_NOT_FOUND)) + DP(BNX2X_MSG_DCB, "DCBX_REMOTE_PFC_TLV_NOT_FOUND\n"); + if (bp->dcbx_port_params.app.enabled && pfc->enabled && + !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH | + DCBX_REMOTE_PFC_TLV_NOT_FOUND)) { bp->dcbx_port_params.pfc.enabled = true; bp->dcbx_port_params.pfc.priority_non_pauseable_mask = ~(pfc->pri_en_bitmap); } else { - DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n"); + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_DISABLED\n"); bp->dcbx_port_params.pfc.enabled = false; bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0; } @@ -352,7 +331,7 @@ static void bnx2x_dcbx_map_nw(struct bnx2x *bp) for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) { if (cos_params[i].pri_bitmask & nw_prio) { /* extend the bitmask with unmapped */ - DP(NETIF_MSG_LINK, + DP(BNX2X_MSG_DCB, "cos %d extended with 0x%08x\n", i, unmapped); cos_params[i].pri_bitmask |= unmapped; break; @@ -384,7 +363,6 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp, struct lldp_remote_mib *remote_mib ; struct lldp_local_mib *local_mib; - switch (read_mib_type) { case DCBX_READ_LOCAL_MIB: mib_size = sizeof(struct lldp_local_mib); @@ -430,8 +408,12 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp, static void bnx2x_pfc_set_pfc(struct bnx2x *bp) { + int mfw_configured = SHMEM2_HAS(bp, drv_flags) && + GET_FLAGS(SHMEM2_RD(bp, drv_flags), + 1 << DRV_FLAGS_DCB_MFW_CONFIGURED); + if (bp->dcbx_port_params.pfc.enabled && - !(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)) + (!(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) || mfw_configured)) /* * 1. Fills up common PFC structures if required * 2. Configure NIG, MAC and BRB via the elink @@ -441,30 +423,52 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp) bnx2x_pfc_clear(bp); } -static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) +int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) { - struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_state_params func_params = {NULL}; + int rc; func_params.f_obj = &bp->func_obj; func_params.cmd = BNX2X_F_CMD_TX_STOP; - DP(NETIF_MSG_LINK, "STOP TRAFFIC\n"); - return bnx2x_func_state_change(bp, &func_params); + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + + DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); + + rc = bnx2x_func_state_change(bp, &func_params); + if (rc) { + BNX2X_ERR("Unable to hold traffic for HW configuration\n"); + bnx2x_panic(); + } + + return rc; } -static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) +int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) { - struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_state_params func_params = {NULL}; struct bnx2x_func_tx_start_params *tx_params = &func_params.params.tx_start; + int rc; func_params.f_obj = &bp->func_obj; func_params.cmd = BNX2X_F_CMD_TX_START; + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + bnx2x_dcbx_fw_struct(bp, tx_params); - DP(NETIF_MSG_LINK, "START TRAFFIC\n"); - return bnx2x_func_state_change(bp, &func_params); + DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); + + rc = bnx2x_func_state_change(bp, &func_params); + if (rc) { + BNX2X_ERR("Unable to resume traffic after HW configuration\n"); + bnx2x_panic(); + } + + return rc; } static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) @@ -529,7 +533,7 @@ static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) /* * In E3B0 the configuration may have more than 2 COS. */ -void bnx2x_dcbx_update_ets_config(struct bnx2x *bp) +static void bnx2x_dcbx_update_ets_config(struct bnx2x *bp) { struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); struct bnx2x_ets_params ets_params = { 0 }; @@ -569,10 +573,14 @@ void bnx2x_dcbx_update_ets_config(struct bnx2x *bp) static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) { + int mfw_configured = SHMEM2_HAS(bp, drv_flags) && + GET_FLAGS(SHMEM2_RD(bp, drv_flags), + 1 << DRV_FLAGS_DCB_MFW_CONFIGURED); + bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); if (!bp->dcbx_port_params.ets.enabled || - (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)) + ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured)) return; if (CHIP_IS_E3B0(bp)) @@ -588,7 +596,7 @@ static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp) u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset); int rc; - DP(NETIF_MSG_LINK, "dcbx_remote_mib_offset 0x%x\n", + DP(BNX2X_MSG_DCB, "dcbx_remote_mib_offset 0x%x\n", dcbx_remote_mib_offset); if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) { @@ -600,7 +608,7 @@ static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp) DCBX_READ_REMOTE_MIB); if (rc) { - BNX2X_ERR("Faild to read remote mib from FW\n"); + BNX2X_ERR("Failed to read remote mib from FW\n"); return rc; } @@ -617,7 +625,7 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset); int rc; - DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset); + DP(BNX2X_MSG_DCB, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset); if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) { BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n"); @@ -628,7 +636,7 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) DCBX_READ_LOCAL_MIB); if (rc) { - BNX2X_ERR("Faild to read local mib from FW\n"); + BNX2X_ERR("Failed to read local mib from FW\n"); return rc; } @@ -638,7 +646,6 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) return 0; } - #ifdef BCM_DCBNL static inline u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent) @@ -693,18 +700,17 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask & (1 << prio)) { bp->prio_to_cos[prio] = cos; - DP(NETIF_MSG_LINK, + DP(BNX2X_MSG_DCB, "tx_mapping %d --> %d\n", prio, cos); } } } /* setup tc must be called under rtnl lock, but we can't take it here - * as we are handling an attetntion on a work queue which must be + * as we are handling an attention on a work queue which must be * flushed at some rtnl-locked contexts (e.g. if down) */ - if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) - schedule_delayed_work(&bp->sp_rtnl_task, 0); + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0); } void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) @@ -712,7 +718,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) switch (state) { case BNX2X_DCBX_STATE_NEG_RECEIVED: { - DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); + DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); #ifdef BCM_DCBNL /** * Delete app tlvs from dcbnl before reading new @@ -720,7 +726,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) */ bnx2x_dcbnl_update_applist(bp, true); - /* Read rmeote mib if dcbx is in the FW */ + /* Read remote mib if dcbx is in the FW */ if (bnx2x_dcbx_read_shmem_remote_mib(bp)) return; #endif @@ -735,7 +741,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) bp->dcbx_error); /* mark DCBX result for PMF migration */ - bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1); + bnx2x_update_drv_flags(bp, + 1 << DRV_FLAGS_DCB_CONFIGURED, + 1); #ifdef BCM_DCBNL /* * Add new app tlvs to dcbnl @@ -749,26 +757,26 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) bnx2x_dcbx_update_tc_mapping(bp); /* - * allow other funtions to update their netdevices + * allow other functions to update their netdevices * accordingly */ if (IS_MF(bp)) bnx2x_link_sync_notify(bp); - bnx2x_dcbx_stop_hw_tx(bp); - + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0); return; } case BNX2X_DCBX_STATE_TX_PAUSED: - DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n"); + DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_PAUSED\n"); bnx2x_pfc_set_pfc(bp); bnx2x_dcbx_update_ets_params(bp); - bnx2x_dcbx_resume_hw_tx(bp); + /* ets may affect cmng configuration: reinit it in hw */ + bnx2x_set_local_cmng(bp); return; case BNX2X_DCBX_STATE_TX_RELEASED: - DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n"); + DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n"); bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0); #ifdef BCM_DCBNL /* @@ -859,7 +867,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i, (u8)dp->admin_configuration_bw_precentage[i]); - DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n", + DP(BNX2X_MSG_DCB, "pg_bw_tbl[%d] = %02x\n", i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i)); } @@ -867,11 +875,11 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i, (u8)dp->admin_configuration_ets_pg[i]); - DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n", + DP(BNX2X_MSG_DCB, "pri_pg_tbl[%d] = %02x\n", i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i)); } - /*For IEEE admin_recommendation_bw_precentage + /*For IEEE admin_recommendation_bw_percentage *For IEEE admin_recommendation_ets_pg */ af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { @@ -903,13 +911,11 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, } af->app.default_pri = (u8)dp->admin_default_priority; - } /* Write the data. */ bnx2x_write_data(bp, (u32 *)&admin_mib, offset, sizeof(struct lldp_admin_mib)); - } void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) @@ -921,7 +927,7 @@ void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) bp->dcb_state = false; bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID; } - DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n", + DP(BNX2X_MSG_DCB, "DCB state [%s:%s]\n", dcb_on ? "ON" : "OFF", dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" : dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" : @@ -943,30 +949,30 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp) bp->dcbx_config_params.admin_application_priority_tx_enable = 1; bp->dcbx_config_params.admin_ets_reco_valid = 1; bp->dcbx_config_params.admin_app_priority_willing = 1; - bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00; - bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50; - bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50; + bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 100; + bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 0; bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0; bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0; bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0; bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0; bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0; - bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1; + bp->dcbx_config_params.admin_configuration_ets_pg[0] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0; - bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2; + bp->dcbx_config_params.admin_configuration_ets_pg[3] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0; - bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0; - bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1; - bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2; + bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 100; + bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 0; bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0; - bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7; - bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5; - bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6; - bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7; + bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 0; bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0; bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1; bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2; @@ -975,60 +981,61 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp) bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5; bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6; bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7; - bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */ - bp->dcbx_config_params.admin_priority_app_table[0].valid = 1; - bp->dcbx_config_params.admin_priority_app_table[1].valid = 1; + bp->dcbx_config_params.admin_pfc_bitmap = 0x0; + bp->dcbx_config_params.admin_priority_app_table[0].valid = 0; + bp->dcbx_config_params.admin_priority_app_table[1].valid = 0; bp->dcbx_config_params.admin_priority_app_table[2].valid = 0; bp->dcbx_config_params.admin_priority_app_table[3].valid = 0; - bp->dcbx_config_params.admin_priority_app_table[0].priority = 3; - bp->dcbx_config_params.admin_priority_app_table[1].priority = 0; - bp->dcbx_config_params.admin_priority_app_table[2].priority = 0; - bp->dcbx_config_params.admin_priority_app_table[3].priority = 0; - bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0; - bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1; - bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0; - bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0; - bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906; - bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260; - bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0; - bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0; - bp->dcbx_config_params.admin_default_priority = - bp->dcbx_config_params.admin_priority_app_table[1].priority; -} - -void bnx2x_dcbx_init(struct bnx2x *bp) + bp->dcbx_config_params.admin_default_priority = 0; +} + +void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem) { u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE; + /* only PMF can send ADMIN msg to MFW in old MFW versions */ + if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF))) + return; + if (bp->dcbx_enabled <= 0) return; /* validate: * chip of good for dcbx version, * dcb is wanted - * the function is pmf * shmem2 contains DCBX support fields */ - DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n", + DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n", bp->dcb_state, bp->port.pmf); - if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && + if (bp->dcb_state == BNX2X_DCB_STATE_ON && SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { dcbx_lldp_params_offset = SHMEM2_RD(bp, dcbx_lldp_params_offset); - DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n", + DP(BNX2X_MSG_DCB, "dcbx_lldp_params_offset 0x%x\n", dcbx_lldp_params_offset); - bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0); + bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0); if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { - bnx2x_dcbx_admin_mib_updated_params(bp, - dcbx_lldp_params_offset); + /* need HW lock to avoid scenario of two drivers + * writing in parallel to shmem + */ + bnx2x_acquire_hw_lock(bp, + HW_LOCK_RESOURCE_DCBX_ADMIN_MIB); + if (update_shmem) + bnx2x_dcbx_admin_mib_updated_params(bp, + dcbx_lldp_params_offset); /* Let HW start negotiation */ bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); + /* release HW lock only after MFW acks that it finished + * reading values from shmem + */ + bnx2x_release_hw_lock(bp, + HW_LOCK_RESOURCE_DCBX_ADMIN_MIB); } } } @@ -1039,38 +1046,36 @@ bnx2x_dcbx_print_cos_params(struct bnx2x *bp, u8 pri = 0; u8 cos = 0; - DP(NETIF_MSG_LINK, + DP(BNX2X_MSG_DCB, "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version); - DP(NETIF_MSG_LINK, - "pdev->params.dcbx_port_params.pfc." - "priority_non_pauseable_mask %x\n", + DP(BNX2X_MSG_DCB, + "pdev->params.dcbx_port_params.pfc.priority_non_pauseable_mask %x\n", bp->dcbx_port_params.pfc.priority_non_pauseable_mask); for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) { - DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." - "cos_params[%d].pri_bitmask %x\n", cos, - bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask); + DP(BNX2X_MSG_DCB, + "pdev->params.dcbx_port_params.ets.cos_params[%d].pri_bitmask %x\n", + cos, bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask); - DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." - "cos_params[%d].bw_tbl %x\n", cos, - bp->dcbx_port_params.ets.cos_params[cos].bw_tbl); + DP(BNX2X_MSG_DCB, + "pdev->params.dcbx_port_params.ets.cos_params[%d].bw_tbl %x\n", + cos, bp->dcbx_port_params.ets.cos_params[cos].bw_tbl); - DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." - "cos_params[%d].strict %x\n", cos, - bp->dcbx_port_params.ets.cos_params[cos].strict); + DP(BNX2X_MSG_DCB, + "pdev->params.dcbx_port_params.ets.cos_params[%d].strict %x\n", + cos, bp->dcbx_port_params.ets.cos_params[cos].strict); - DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." - "cos_params[%d].pauseable %x\n", cos, - bp->dcbx_port_params.ets.cos_params[cos].pauseable); + DP(BNX2X_MSG_DCB, + "pdev->params.dcbx_port_params.ets.cos_params[%d].pauseable %x\n", + cos, bp->dcbx_port_params.ets.cos_params[cos].pauseable); } for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { - DP(NETIF_MSG_LINK, - "pfc_fw_cfg->traffic_type_to_priority_cos[%d]." - "priority %x\n", pri, - pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority); + DP(BNX2X_MSG_DCB, + "pfc_fw_cfg->traffic_type_to_priority_cos[%d].priority %x\n", + pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority); - DP(NETIF_MSG_LINK, + DP(BNX2X_MSG_DCB, "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n", pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos); } @@ -1084,7 +1089,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, bool pg_found = false; u32 i, traf_type, add_traf_type, add_pg; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; - struct pg_entry_help_data *data = help_data->data; /*shotcut*/ + struct pg_entry_help_data *data = help_data->data; /*shortcut*/ /* Set to invalid */ for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) @@ -1117,7 +1122,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, help_data->num_of_pg++; } } - DP(NETIF_MSG_LINK, + DP(BNX2X_MSG_DCB, "add_traf_type %d pg_found %s num_of_pg %d\n", add_traf_type, (false == pg_found) ? "NO" : "YES", help_data->num_of_pg); @@ -1180,7 +1185,8 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry)); else /* If we join a group and one is strict - * than the bw rulls */ + * than the bw rules + */ cos_data->data[entry].strict = BNX2X_DCBX_STRICT_COS_HIGHEST; } @@ -1189,7 +1195,6 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, BNX2X_ERR("dcbx error: Both groups must have priorities\n"); } - #ifndef POWER_OF_2 #define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1)))) #endif @@ -1292,7 +1297,7 @@ static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, } else { /* If there are only pauseable priorities or * only non-pauseable,* the lower priorities go - * to the first queue and the higherpriorities go + * to the first queue and the higher priorities go * to the second queue. */ cos_data->data[0].pausable = @@ -1310,8 +1315,7 @@ static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, } if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX) - BNX2X_ERR("Invalid value for pri_join_mask -" - " could not find a priority\n"); + BNX2X_ERR("Invalid value for pri_join_mask - could not find a priority\n"); cos_data->data[0].pri_join_mask = pri_mask_without_pri; cos_data->data[1].pri_join_mask = pri_tested; @@ -1493,7 +1497,7 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( * queue and one priority goes to the second queue. * * We will join this two cases: - * if one is BW limited it will go to the secoend queue + * if one is BW limited it will go to the second queue * otherwise the last priority will get it */ @@ -1513,7 +1517,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( false == b_found_strict) /* last entry will be handled separately * If no priority is strict than last - * enty goes to last queue.*/ + * entry goes to last queue. + */ entry = 1; cos_data->data[entry].pri_join_mask |= pri_tested; @@ -1525,7 +1530,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( b_found_strict = true; cos_data->data[1].pri_join_mask |= pri_tested; /* If we join a group and one is strict - * than the bw rulls */ + * than the bw rules + */ cos_data->data[1].strict = BNX2X_DCBX_STRICT_COS_HIGHEST; } @@ -1533,7 +1539,6 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( } } - static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, struct pg_help_data *help_data, struct dcbx_ets_feature *ets, @@ -1542,7 +1547,6 @@ static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, u32 pri_join_mask, u8 num_of_dif_pri) { - /* default E2 settings */ cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2; @@ -1624,8 +1628,10 @@ static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp, num_of_app_pri--; } - if (num_spread_of_entries) + if (num_spread_of_entries) { + BNX2X_ERR("Didn't succeed to spread strict priorities\n"); return -EINVAL; + } return 0; } @@ -1636,7 +1642,6 @@ static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp, u8 num_spread_of_entries, u8 strict_app_pris) { - if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry, num_spread_of_entries, strict_app_pris)) { @@ -1673,8 +1678,7 @@ static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp, if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) { if (bnx2x_dcbx_join_pgs(bp, ets, help_data, DCBX_COS_MAX_NUM_E3B0)) { - BNX2X_ERR("Unable to reduce the number of PGs -" - "we will disables ETS\n"); + BNX2X_ERR("Unable to reduce the number of PGs - we will disables ETS\n"); bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask); return; @@ -1774,24 +1778,24 @@ static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, if (p->pauseable && DCBX_PFC_PRI_GET_NON_PAUSE(bp, p->pri_bitmask) != 0) - BNX2X_ERR("Inconsistent config for " - "pausable COS %d\n", i); + BNX2X_ERR("Inconsistent config for pausable COS %d\n", + i); if (!p->pauseable && DCBX_PFC_PRI_GET_PAUSE(bp, p->pri_bitmask) != 0) - BNX2X_ERR("Inconsistent config for " - "nonpausable COS %d\n", i); + BNX2X_ERR("Inconsistent config for nonpausable COS %d\n", + i); } } if (p->pauseable) - DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n", + DP(BNX2X_MSG_DCB, "COS %d PAUSABLE prijoinmask 0x%x\n", i, cos_data.data[i].pri_join_mask); else - DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask " - "0x%x\n", - i, cos_data.data[i].pri_join_mask); + DP(BNX2X_MSG_DCB, + "COS %d NONPAUSABLE prijoinmask 0x%x\n", + i, cos_data.data[i].pri_join_mask); } bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ; @@ -1806,7 +1810,7 @@ static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) { set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i); - DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n", + DP(BNX2X_MSG_DCB, "set_configuration_ets_pg[%d] = 0x%x\n", i, set_configuration_ets_pg[i]); } } @@ -1818,11 +1822,14 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, u8 cos = 0, pri = 0; struct priority_cos *tt2cos; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + int mfw_configured = SHMEM2_HAS(bp, drv_flags) && + GET_FLAGS(SHMEM2_RD(bp, drv_flags), + 1 << DRV_FLAGS_DCB_MFW_CONFIGURED); memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg)); /* to disable DCB - the structure must be zeroed */ - if (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) + if ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured) return; /*shortcut*/ @@ -1853,11 +1860,11 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, void bnx2x_dcbx_pmf_update(struct bnx2x *bp) { - /* if we need to syncronize DCBX result from prev PMF + /* if we need to synchronize DCBX result from prev PMF * read it from shmem and update bp and netdev accordingly */ if (SHMEM2_HAS(bp, drv_flags) && - GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) { + GET_FLAGS(SHMEM2_RD(bp, drv_flags), 1 << DRV_FLAGS_DCB_CONFIGURED)) { /* Read neg results if dcbx is in the FW */ if (bnx2x_dcbx_read_shmem_neg_results(bp)) return; @@ -1881,7 +1888,6 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp) * dcbx negotiation. */ bnx2x_dcbx_update_tc_mapping(bp); - } } @@ -1902,14 +1908,21 @@ static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp) static u8 bnx2x_dcbnl_get_state(struct net_device *netdev) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "state = %d\n", bp->dcb_state); + DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcb_state); return bp->dcb_state; } static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off"); + DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off"); + + /* Fail to set state to "enabled" if dcbx is disabled in nvram */ + if (state && ((bp->dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) || + (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_INVALID))) { + DP(BNX2X_MSG_DCB, "Can not set dcbx to enabled while it is disabled in nvm\n"); + return 1; + } bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled); return 0; @@ -1919,15 +1932,15 @@ static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "GET-PERM-ADDR\n"); + DP(BNX2X_MSG_DCB, "GET-PERM-ADDR\n"); /* first the HW mac address */ memcpy(perm_addr, netdev->dev_addr, netdev->addr_len); -#ifdef BCM_CNIC - /* second SAN address */ - memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len); -#endif + if (CNIC_LOADED(bp)) + /* second SAN address */ + memcpy(perm_addr+netdev->addr_len, bp->fip_mac, + netdev->addr_len); } static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, @@ -1936,19 +1949,19 @@ static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, pgid); + DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, pgid); if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) return; /** - * bw_pct ingnored - band-width percentage devision between user + * bw_pct ignored - band-width percentage devision between user * priorities within the same group is not * standard and hence not supported * - * prio_type igonred - priority levels within the same group are not + * prio_type ignored - priority levels within the same group are not * standard and hence are not supported. According * to the standard pgid 15 is dedicated to strict - * prioirty traffic (on the port level). + * priority traffic (on the port level). * * up_map ignored */ @@ -1961,7 +1974,7 @@ static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev, int pgid, u8 bw_pct) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "pgid[%d] = %d\n", pgid, bw_pct); + DP(BNX2X_MSG_DCB, "pgid[%d] = %d\n", pgid, bw_pct); if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) return; @@ -1975,14 +1988,14 @@ static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio, u8 up_map) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n"); + DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n"); } static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev, int pgid, u8 bw_pct) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n"); + DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n"); } static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio, @@ -1990,17 +2003,17 @@ static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio, u8 *up_map) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "prio = %d\n", prio); + DP(BNX2X_MSG_DCB, "prio = %d\n", prio); /** - * bw_pct ingnored - band-width percentage devision between user + * bw_pct ignored - band-width percentage devision between user * priorities within the same group is not * standard and hence not supported * - * prio_type igonred - priority levels within the same group are not + * prio_type ignored - priority levels within the same group are not * standard and hence are not supported. According * to the standard pgid 15 is dedicated to strict - * prioirty traffic (on the port level). + * priority traffic (on the port level). * * up_map ignored */ @@ -2016,7 +2029,7 @@ static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "pgid = %d\n", pgid); + DP(BNX2X_MSG_DCB, "pgid = %d\n", pgid); *bw_pct = 0; @@ -2031,7 +2044,7 @@ static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio, u8 *up_map) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n"); + DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n"); *prio_type = *pgid = *bw_pct = *up_map = 0; } @@ -2040,7 +2053,7 @@ static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n"); + DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n"); *bw_pct = 0; } @@ -2049,22 +2062,24 @@ static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 setting) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, setting); + DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, setting); if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES) return; - bp->dcbx_config_params.admin_pfc_bitmap |= ((setting ? 1 : 0) << prio); - - if (setting) + if (setting) { + bp->dcbx_config_params.admin_pfc_bitmap |= (1 << prio); bp->dcbx_config_params.admin_pfc_tx_enable = 1; + } else { + bp->dcbx_config_params.admin_pfc_bitmap &= ~(1 << prio); + } } static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "prio = %d\n", prio); + DP(BNX2X_MSG_DCB, "prio = %d\n", prio); *setting = 0; @@ -2079,21 +2094,23 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) struct bnx2x *bp = netdev_priv(netdev); int rc = 0; - DP(NETIF_MSG_LINK, "SET-ALL\n"); + DP(BNX2X_MSG_DCB, "SET-ALL\n"); if (!bnx2x_dcbnl_set_valid(bp)) return 1; if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - netdev_err(bp->dev, "Handling parity error recovery. " - "Try again later\n"); + netdev_err(bp->dev, + "Handling parity error recovery. Try again later\n"); return 1; } if (netif_running(bp->dev)) { - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - rc = bnx2x_nic_load(bp, LOAD_NORMAL); + bnx2x_update_drv_flags(bp, + 1 << DRV_FLAGS_DCB_MFW_CONFIGURED, + 1); + bnx2x_dcbx_init(bp, true); } - DP(NETIF_MSG_LINK, "set_dcbx_params done (%d)\n", rc); + DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc); if (rc) return 1; @@ -2132,22 +2149,25 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) *cap = BNX2X_DCBX_CAPS; break; default: - rval = -EINVAL; + BNX2X_ERR("Non valid capability ID\n"); + rval = 1; break; } - } else - rval = -EINVAL; + } else { + DP(BNX2X_MSG_DCB, "DCB disabled\n"); + rval = 1; + } - DP(NETIF_MSG_LINK, "capid %d:%x\n", capid, *cap); + DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap); return rval; } -static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) +static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) { struct bnx2x *bp = netdev_priv(netdev); u8 rval = 0; - DP(NETIF_MSG_LINK, "tcid %d\n", tcid); + DP(BNX2X_MSG_DCB, "tcid %d\n", tcid); if (bp->dcb_state) { switch (tcid) { @@ -2160,26 +2180,29 @@ static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) DCBX_COS_MAX_NUM_E2; break; default: - rval = -EINVAL; + BNX2X_ERR("Non valid TC-ID\n"); + rval = 1; break; } - } else - rval = -EINVAL; + } else { + DP(BNX2X_MSG_DCB, "DCB disabled\n"); + rval = 1; + } return rval; } -static u8 bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num) +static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "num tcs = %d; Not supported\n", num); + DP(BNX2X_MSG_DCB, "num tcs = %d; Not supported\n", num); return -EINVAL; } -static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) +static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); + DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); if (!bp->dcb_state) return 0; @@ -2190,7 +2213,7 @@ static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off"); + DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off"); if (!bnx2x_dcbnl_set_valid(bp)) return; @@ -2267,9 +2290,11 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) bnx2x_admin_app_set_ent( &bp->dcbx_config_params.admin_priority_app_table[ff], idtype, idval, up); - else + else { /* app table is full */ + BNX2X_ERR("Application table is too large\n"); return -EBUSY; + } /* up configured, if not 0 make sure feature is enabled */ if (up) @@ -2283,11 +2308,13 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype, { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "app_type %d, app_id %x, prio bitmap %d\n", + DP(BNX2X_MSG_DCB, "app_type %d, app_id %x, prio bitmap %d\n", idtype, idval, up); - if (!bnx2x_dcbnl_set_valid(bp)) + if (!bnx2x_dcbnl_set_valid(bp)) { + DP(BNX2X_MSG_DCB, "dcbnl call not valid\n"); return -EINVAL; + } /* verify idtype */ switch (idtype) { @@ -2295,6 +2322,7 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype, case DCB_APP_IDTYPE_PORTNUM: break; default: + DP(BNX2X_MSG_DCB, "Wrong ID type\n"); return -EINVAL; } return bnx2x_set_admin_app_up(bp, idtype, idval, up); @@ -2316,13 +2344,13 @@ static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev) static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state) { struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "state = %02x\n", state); + DP(BNX2X_MSG_DCB, "state = %02x\n", state); /* set dcbx mode */ if ((state & BNX2X_DCBX_CAPS) != state) { - BNX2X_ERR("Requested DCBX mode %x is beyond advertised " - "capabilities\n", state); + BNX2X_ERR("Requested DCBX mode %x is beyond advertised capabilities\n", + state); return 1; } @@ -2346,7 +2374,7 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, struct bnx2x *bp = netdev_priv(netdev); u8 rval = 0; - DP(NETIF_MSG_LINK, "featid %d\n", featid); + DP(BNX2X_MSG_DCB, "featid %d\n", featid); if (bp->dcb_state) { *flags = 0; @@ -2354,29 +2382,35 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, case DCB_FEATCFG_ATTR_PG: if (bp->dcbx_local_feat.ets.enabled) *flags |= DCB_FEATCFG_ENABLE; - if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR) + if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR | + DCBX_REMOTE_MIB_ERROR)) *flags |= DCB_FEATCFG_ERROR; break; case DCB_FEATCFG_ATTR_PFC: if (bp->dcbx_local_feat.pfc.enabled) *flags |= DCB_FEATCFG_ENABLE; if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | - DCBX_LOCAL_PFC_MISMATCH)) + DCBX_LOCAL_PFC_MISMATCH | + DCBX_REMOTE_MIB_ERROR)) *flags |= DCB_FEATCFG_ERROR; break; case DCB_FEATCFG_ATTR_APP: if (bp->dcbx_local_feat.app.enabled) *flags |= DCB_FEATCFG_ENABLE; if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | - DCBX_LOCAL_APP_MISMATCH)) + DCBX_LOCAL_APP_MISMATCH | + DCBX_REMOTE_MIB_ERROR)) *flags |= DCB_FEATCFG_ERROR; break; default: - rval = -EINVAL; + BNX2X_ERR("Non valid feature-ID\n"); + rval = 1; break; } - } else - rval = -EINVAL; + } else { + DP(BNX2X_MSG_DCB, "DCB disabled\n"); + rval = 1; + } return rval; } @@ -2387,7 +2421,7 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid, struct bnx2x *bp = netdev_priv(netdev); u8 rval = 0; - DP(NETIF_MSG_LINK, "featid = %d flags = %02x\n", featid, flags); + DP(BNX2X_MSG_DCB, "featid = %d flags = %02x\n", featid, flags); /* ignore the 'advertise' flag */ if (bnx2x_dcbnl_set_valid(bp)) { @@ -2410,11 +2444,14 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid, flags & DCB_FEATCFG_WILLING ? 1 : 0; break; default: - rval = -EINVAL; + BNX2X_ERR("Non valid feature-ID\n"); + rval = 1; break; } - } else - rval = -EINVAL; + } else { + DP(BNX2X_MSG_DCB, "dcbnl call not valid\n"); + rval = 1; + } return rval; } @@ -2425,7 +2462,7 @@ static int bnx2x_peer_appinfo(struct net_device *netdev, int i; struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "APP-INFO\n"); + DP(BNX2X_MSG_DCB, "APP-INFO\n"); info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0; info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0; @@ -2444,7 +2481,7 @@ static int bnx2x_peer_apptable(struct net_device *netdev, int i, j; struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "APP-TABLE\n"); + DP(BNX2X_MSG_DCB, "APP-TABLE\n"); for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { struct dcbx_app_priority_entry *ent = diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index 2ab9254e2d5..c6939ecb02c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h @@ -1,6 +1,6 @@ /* bnx2x_dcb.h: Broadcom Everest network driver. * - * Copyright 2009-2011 Broadcom Corporation + * Copyright 2009-2013 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -12,7 +12,7 @@ * license other than the GPL, without Broadcom's express prior written * consent. * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Dmitry Kravkov * */ @@ -134,8 +134,6 @@ enum { #define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130 #define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170 - - struct cos_entry_help_data { u32 pri_join_mask; u32 cos_bw; @@ -170,7 +168,6 @@ struct cos_help_data { (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \ IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri)))) - struct pg_entry_help_data { u8 num_of_dif_pri; u8 pg; @@ -202,4 +199,7 @@ extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); #endif /* BCM_DCBNL */ +int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); +int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); + #endif /* BNX2X_DCB_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h index b983825d0ee..12eb4baee9f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h @@ -1,6 +1,6 @@ /* bnx2x_dump.h: Broadcom Everest network driver. * - * Copyright (c) 2011 Broadcom Corporation + * Copyright (c) 2012-2013 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -13,129 +13,39 @@ * consent. */ - -/* This struct holds a signature to ensure the dump returned from the driver - * match the meta data file inserted to grc_dump.tcl - * The signature is time stamp, diag version and grc_dump version - */ - #ifndef BNX2X_DUMP_H #define BNX2X_DUMP_H +/* WaitP Definitions */ +#define DRV_DUMP_XSTORM_WAITP_ADDRESS 0x2b8a80 +#define DRV_DUMP_TSTORM_WAITP_ADDRESS 0x1b8a80 +#define DRV_DUMP_USTORM_WAITP_ADDRESS 0x338a80 +#define DRV_DUMP_CSTORM_WAITP_ADDRESS 0x238a80 +/* Possible Chips */ +#define DUMP_CHIP_E1 1 +#define DUMP_CHIP_E1H 2 +#define DUMP_CHIP_E2 4 +#define DUMP_CHIP_E3A0 8 +#define DUMP_CHIP_E3B0 16 +#define DUMP_PATH_0 512 +#define DUMP_PATH_1 1024 +#define NUM_PRESETS 13 +#define NUM_CHIPS 5 -/*definitions */ -#define XSTORM_WAITP_ADDR 0x2b8a80 -#define TSTORM_WAITP_ADDR 0x1b8a80 -#define USTORM_WAITP_ADDR 0x338a80 -#define CSTORM_WAITP_ADDR 0x238a80 -#define TSTORM_CAM_MODE 0x1B1440 - -#define MAX_TIMER_PENDING 200 -#define TIMER_SCAN_DONT_CARE 0xFF -#define RI_E1 0x1 -#define RI_E1H 0x2 -#define RI_E2 0x4 -#define RI_E3 0x8 -#define RI_E3B0 0x10 -#define RI_ONLINE 0x100 -#define RI_OFFLINE 0x0 -#define RI_PATH0_DUMP 0x200 -#define RI_PATH1_DUMP 0x400 - -#define RI_E1_ONLINE (RI_E1 | RI_ONLINE) -#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) -#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) -#define RI_E2_ONLINE (RI_E2 | RI_ONLINE) -#define RI_E1E2_ONLINE (RI_E1 | RI_E2 | RI_ONLINE) -#define RI_E1HE2_ONLINE (RI_E1H | RI_E2 | RI_ONLINE) -#define RI_E1E1HE2_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) -#define RI_E3_ONLINE (RI_E3 | RI_ONLINE) -#define RI_E1E3_ONLINE (RI_E1 | RI_E3 | RI_ONLINE) -#define RI_E1HE3_ONLINE (RI_E1H | RI_E3 | RI_ONLINE) -#define RI_E1E1HE3_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_ONLINE) -#define RI_E2E3_ONLINE (RI_E2 | RI_E3 | RI_ONLINE) -#define RI_E1E2E3_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_ONLINE) -#define RI_E1HE2E3_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_ONLINE) -#define RI_E1E1HE2E3_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE) -#define RI_E3B0_ONLINE (RI_E3B0 | RI_ONLINE) -#define RI_E1E3B0_ONLINE (RI_E1 | RI_E3B0 | RI_ONLINE) -#define RI_E1HE3B0_ONLINE (RI_E1H | RI_E3B0 | RI_ONLINE) -#define RI_E1E1HE3B0_ONLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_ONLINE) -#define RI_E2E3B0_ONLINE (RI_E2 | RI_E3B0 | RI_ONLINE) -#define RI_E1E2E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_ONLINE) -#define RI_E1HE2E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE) -#define RI_E1E1HE2E3B0_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE) -#define RI_E3E3B0_ONLINE (RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1E3E3B0_ONLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1HE3E3B0_ONLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1E1HE3E3B0_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E2E3E3B0_ONLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1E2E3E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1HE2E3E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1E1HE2E3E3B0_ONLINE \ - (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1_OFFLINE (RI_E1 | RI_OFFLINE) -#define RI_E1H_OFFLINE (RI_E1H | RI_OFFLINE) -#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H | RI_OFFLINE) -#define RI_E2_OFFLINE (RI_E2 | RI_OFFLINE) -#define RI_E1E2_OFFLINE (RI_E1 | RI_E2 | RI_OFFLINE) -#define RI_E1HE2_OFFLINE (RI_E1H | RI_E2 | RI_OFFLINE) -#define RI_E1E1HE2_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_OFFLINE) -#define RI_E3_OFFLINE (RI_E3 | RI_OFFLINE) -#define RI_E1E3_OFFLINE (RI_E1 | RI_E3 | RI_OFFLINE) -#define RI_E1HE3_OFFLINE (RI_E1H | RI_E3 | RI_OFFLINE) -#define RI_E1E1HE3_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_OFFLINE) -#define RI_E2E3_OFFLINE (RI_E2 | RI_E3 | RI_OFFLINE) -#define RI_E1E2E3_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_OFFLINE) -#define RI_E1HE2E3_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE) -#define RI_E1E1HE2E3_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE) -#define RI_E3B0_OFFLINE (RI_E3B0 | RI_OFFLINE) -#define RI_E1E3B0_OFFLINE (RI_E1 | RI_E3B0 | RI_OFFLINE) -#define RI_E1HE3B0_OFFLINE (RI_E1H | RI_E3B0 | RI_OFFLINE) -#define RI_E1E1HE3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_OFFLINE) -#define RI_E2E3B0_OFFLINE (RI_E2 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E2E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_OFFLINE) -#define RI_E1HE2E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E1HE2E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE) -#define RI_E3E3B0_OFFLINE (RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E3E3B0_OFFLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1HE3E3B0_OFFLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E1HE3E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E2E3E3B0_OFFLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E2E3E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1HE2E3E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E1HE2E3E3B0_OFFLINE \ - (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_ALL_ONLINE RI_E1E1HE2E3E3B0_ONLINE -#define RI_ALL_OFFLINE RI_E1E1HE2E3E3B0_OFFLINE - -#define DBG_DMP_TRACE_BUFFER_SIZE 0x800 -#define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \ - ((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE) - -struct dump_sign { - u32 time_stamp; - u32 diag_ver; - u32 grc_dump_ver; -}; - -struct dump_hdr { - u32 hdr_size; /* in dwords, excluding this field */ - struct dump_sign dump_sign; - u32 xstorm_waitp; - u32 tstorm_waitp; - u32 ustorm_waitp; - u32 cstorm_waitp; - u16 info; - u8 idle_chk; - u8 reserved; +struct dump_header { + u32 header_size; /* Size in DWORDs excluding this field */ + u32 version; + u32 preset; + u32 dump_meta_data; /* OR of CHIP and PATH. */ }; +#define BNX2X_DUMP_VERSION 0x50acff01 struct reg_addr { u32 addr; u32 size; - u16 info; + u32 chips; + u32 presets; }; struct wreg_addr { @@ -143,1014 +53,2168 @@ struct wreg_addr { u32 size; u32 read_regs_count; const u32 *read_regs; - u16 info; + u32 chips; + u32 presets; +}; + +#define PAGE_MODE_VALUES_E2 2 +#define PAGE_READ_REGS_E2 1 +#define PAGE_WRITE_REGS_E2 1 +static const u32 page_vals_e2[] = {0, 128}; +static const u32 page_write_regs_e2[] = {328476}; +static const struct reg_addr page_read_regs_e2[] = { + {0x58000, 4608, DUMP_CHIP_E2, 0x30} +}; + +#define PAGE_MODE_VALUES_E3 2 +#define PAGE_READ_REGS_E3 1 +#define PAGE_WRITE_REGS_E3 1 +static const u32 page_vals_e3[] = {0, 128}; +static const u32 page_write_regs_e3[] = {328476}; +static const struct reg_addr page_read_regs_e3[] = { + {0x58000, 4608, DUMP_CHIP_E3A0 | DUMP_CHIP_E3B0, 0x30} }; static const struct reg_addr reg_addrs[] = { - { 0x2000, 341, RI_ALL_ONLINE }, - { 0x2800, 103, RI_ALL_ONLINE }, - { 0x3000, 287, RI_ALL_ONLINE }, - { 0x3800, 331, RI_ALL_ONLINE }, - { 0x8800, 6, RI_ALL_ONLINE }, - { 0x8818, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x9000, 147, RI_E2E3E3B0_ONLINE }, - { 0x924c, 1, RI_E2_ONLINE }, - { 0x9250, 16, RI_E2E3E3B0_ONLINE }, - { 0x9400, 33, RI_E2E3E3B0_ONLINE }, - { 0x9484, 5, RI_E3E3B0_ONLINE }, - { 0xa000, 27, RI_ALL_ONLINE }, - { 0xa06c, 1, RI_E1E1H_ONLINE }, - { 0xa070, 71, RI_ALL_ONLINE }, - { 0xa18c, 4, RI_E1E1H_ONLINE }, - { 0xa19c, 62, RI_ALL_ONLINE }, - { 0xa294, 2, RI_E1E1H_ONLINE }, - { 0xa29c, 2, RI_ALL_ONLINE }, - { 0xa2a4, 2, RI_E1E1HE2_ONLINE }, - { 0xa2ac, 52, RI_ALL_ONLINE }, - { 0xa39c, 7, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3b8, 2, RI_E3E3B0_ONLINE }, - { 0xa3c0, 3, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3d0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3d8, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3e0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3e8, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3f0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3f8, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa400, 40, RI_ALL_ONLINE }, - { 0xa4a0, 1, RI_E1E1HE2_ONLINE }, - { 0xa4a4, 2, RI_ALL_ONLINE }, - { 0xa4ac, 2, RI_E1E1H_ONLINE }, - { 0xa4b4, 1, RI_E1E1HE2_ONLINE }, - { 0xa4b8, 2, RI_E1E1H_ONLINE }, - { 0xa4c0, 3, RI_ALL_ONLINE }, - { 0xa4cc, 5, RI_E1E1H_ONLINE }, - { 0xa4e0, 3, RI_ALL_ONLINE }, - { 0xa4fc, 2, RI_ALL_ONLINE }, - { 0xa504, 1, RI_E1E1H_ONLINE }, - { 0xa508, 3, RI_ALL_ONLINE }, - { 0xa518, 1, RI_ALL_ONLINE }, - { 0xa520, 1, RI_ALL_ONLINE }, - { 0xa528, 1, RI_ALL_ONLINE }, - { 0xa530, 1, RI_ALL_ONLINE }, - { 0xa538, 1, RI_ALL_ONLINE }, - { 0xa540, 1, RI_ALL_ONLINE }, - { 0xa548, 1, RI_E1E1H_ONLINE }, - { 0xa550, 1, RI_E1E1H_ONLINE }, - { 0xa558, 1, RI_E1E1H_ONLINE }, - { 0xa560, 1, RI_E1E1H_ONLINE }, - { 0xa568, 1, RI_E1E1H_ONLINE }, - { 0xa570, 1, RI_ALL_ONLINE }, - { 0xa580, 1, RI_ALL_ONLINE }, - { 0xa590, 1, RI_ALL_ONLINE }, - { 0xa5a0, 1, RI_E1E1HE2_ONLINE }, - { 0xa5c0, 1, RI_ALL_ONLINE }, - { 0xa5e0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa5e8, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa5f0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa5f8, 1, RI_E1HE2_ONLINE }, - { 0xa5fc, 9, RI_E1HE2E3E3B0_ONLINE }, - { 0xa620, 6, RI_E2E3E3B0_ONLINE }, - { 0xa638, 20, RI_E2_ONLINE }, - { 0xa688, 42, RI_E2E3E3B0_ONLINE }, - { 0xa730, 1, RI_E2_ONLINE }, - { 0xa734, 2, RI_E2E3E3B0_ONLINE }, - { 0xa73c, 4, RI_E2_ONLINE }, - { 0xa74c, 5, RI_E2E3E3B0_ONLINE }, - { 0xa760, 5, RI_E2_ONLINE }, - { 0xa774, 7, RI_E2E3E3B0_ONLINE }, - { 0xa790, 15, RI_E2_ONLINE }, - { 0xa7cc, 4, RI_E2E3E3B0_ONLINE }, - { 0xa7e0, 6, RI_E3E3B0_ONLINE }, - { 0xa800, 18, RI_E2_ONLINE }, - { 0xa848, 33, RI_E2E3E3B0_ONLINE }, - { 0xa8cc, 2, RI_E3E3B0_ONLINE }, - { 0xa8d4, 4, RI_E2E3E3B0_ONLINE }, - { 0xa8e4, 1, RI_E3E3B0_ONLINE }, - { 0xa8e8, 1, RI_E2E3E3B0_ONLINE }, - { 0xa8f0, 1, RI_E2E3E3B0_ONLINE }, - { 0xa8f8, 30, RI_E3E3B0_ONLINE }, - { 0xa974, 73, RI_E3E3B0_ONLINE }, - { 0xac30, 1, RI_E3E3B0_ONLINE }, - { 0xac40, 1, RI_E3E3B0_ONLINE }, - { 0xac50, 1, RI_E3E3B0_ONLINE }, - { 0xac60, 1, RI_E3B0_ONLINE }, - { 0x10000, 9, RI_ALL_ONLINE }, - { 0x10024, 1, RI_E1E1HE2_ONLINE }, - { 0x10028, 5, RI_ALL_ONLINE }, - { 0x1003c, 6, RI_E1E1HE2_ONLINE }, - { 0x10054, 20, RI_ALL_ONLINE }, - { 0x100a4, 4, RI_E1E1HE2_ONLINE }, - { 0x100b4, 11, RI_ALL_ONLINE }, - { 0x100e0, 4, RI_E1E1HE2_ONLINE }, - { 0x100f0, 8, RI_ALL_ONLINE }, - { 0x10110, 6, RI_E1E1HE2_ONLINE }, - { 0x10128, 110, RI_ALL_ONLINE }, - { 0x102e0, 4, RI_E1E1HE2_ONLINE }, - { 0x102f0, 18, RI_ALL_ONLINE }, - { 0x10338, 20, RI_E1E1HE2_ONLINE }, - { 0x10388, 10, RI_ALL_ONLINE }, - { 0x10400, 6, RI_E1E1HE2_ONLINE }, - { 0x10418, 6, RI_ALL_ONLINE }, - { 0x10430, 10, RI_E1E1HE2_ONLINE }, - { 0x10458, 22, RI_ALL_ONLINE }, - { 0x104b0, 12, RI_E1E1HE2_ONLINE }, - { 0x104e0, 1, RI_ALL_ONLINE }, - { 0x104e8, 2, RI_ALL_ONLINE }, - { 0x104f4, 2, RI_ALL_ONLINE }, - { 0x10500, 146, RI_ALL_ONLINE }, - { 0x10750, 2, RI_E1E1HE2_ONLINE }, - { 0x10760, 2, RI_E1E1HE2_ONLINE }, - { 0x10770, 2, RI_E1E1HE2_ONLINE }, - { 0x10780, 2, RI_E1E1HE2_ONLINE }, - { 0x10790, 2, RI_ALL_ONLINE }, - { 0x107a0, 2, RI_E1E1HE2_ONLINE }, - { 0x107b0, 2, RI_E1E1HE2_ONLINE }, - { 0x107c0, 2, RI_E1E1HE2_ONLINE }, - { 0x107d0, 2, RI_E1E1HE2_ONLINE }, - { 0x107e0, 2, RI_ALL_ONLINE }, - { 0x10880, 2, RI_ALL_ONLINE }, - { 0x10900, 2, RI_ALL_ONLINE }, - { 0x16000, 1, RI_E1HE2_ONLINE }, - { 0x16004, 25, RI_E1HE2E3E3B0_ONLINE }, - { 0x16070, 8, RI_E1HE2E3E3B0_ONLINE }, - { 0x16090, 4, RI_E1HE2E3_ONLINE }, - { 0x160a0, 6, RI_E1HE2E3E3B0_ONLINE }, - { 0x160c0, 7, RI_E1HE2E3E3B0_ONLINE }, - { 0x160dc, 2, RI_E1HE2_ONLINE }, - { 0x160e4, 10, RI_E1HE2E3E3B0_ONLINE }, - { 0x1610c, 2, RI_E1HE2_ONLINE }, - { 0x16114, 6, RI_E1HE2E3E3B0_ONLINE }, - { 0x16140, 48, RI_E1HE2E3E3B0_ONLINE }, - { 0x16204, 5, RI_E1HE2E3E3B0_ONLINE }, - { 0x18000, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x18008, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x18010, 35, RI_E2E3E3B0_ONLINE }, - { 0x180a4, 2, RI_E2E3E3B0_ONLINE }, - { 0x180c0, 9, RI_E2E3E3B0_ONLINE }, - { 0x180e4, 1, RI_E2E3_ONLINE }, - { 0x180e8, 2, RI_E2E3E3B0_ONLINE }, - { 0x180f0, 1, RI_E2E3_ONLINE }, - { 0x180f4, 79, RI_E2E3E3B0_ONLINE }, - { 0x18230, 1, RI_E2E3_ONLINE }, - { 0x18234, 2, RI_E2E3E3B0_ONLINE }, - { 0x1823c, 1, RI_E2E3_ONLINE }, - { 0x18240, 13, RI_E2E3E3B0_ONLINE }, - { 0x18274, 1, RI_E2_ONLINE }, - { 0x18278, 81, RI_E2E3E3B0_ONLINE }, - { 0x18440, 63, RI_E2E3E3B0_ONLINE }, - { 0x18570, 42, RI_E3E3B0_ONLINE }, - { 0x18618, 25, RI_E3B0_ONLINE }, - { 0x18680, 44, RI_E3B0_ONLINE }, - { 0x18748, 12, RI_E3B0_ONLINE }, - { 0x18788, 1, RI_E3B0_ONLINE }, - { 0x1879c, 6, RI_E3B0_ONLINE }, - { 0x187c4, 51, RI_E3B0_ONLINE }, - { 0x18a00, 48, RI_E3B0_ONLINE }, - { 0x20000, 24, RI_ALL_ONLINE }, - { 0x20060, 8, RI_ALL_ONLINE }, - { 0x20080, 94, RI_ALL_ONLINE }, - { 0x201f8, 1, RI_E1E1H_ONLINE }, - { 0x201fc, 1, RI_ALL_ONLINE }, - { 0x20200, 1, RI_E1E1H_ONLINE }, - { 0x20204, 1, RI_ALL_ONLINE }, - { 0x20208, 1, RI_E1E1H_ONLINE }, - { 0x2020c, 39, RI_ALL_ONLINE }, - { 0x202c8, 1, RI_E2E3E3B0_ONLINE }, - { 0x202d8, 4, RI_E2E3E3B0_ONLINE }, - { 0x202f0, 1, RI_E3B0_ONLINE }, - { 0x20400, 2, RI_ALL_ONLINE }, - { 0x2040c, 8, RI_ALL_ONLINE }, - { 0x2042c, 18, RI_E1HE2E3E3B0_ONLINE }, - { 0x20480, 1, RI_ALL_ONLINE }, - { 0x20500, 1, RI_ALL_ONLINE }, - { 0x20600, 1, RI_ALL_ONLINE }, - { 0x28000, 1, RI_ALL_ONLINE }, - { 0x28004, 8191, RI_ALL_OFFLINE }, - { 0x30000, 1, RI_ALL_ONLINE }, - { 0x30004, 16383, RI_ALL_OFFLINE }, - { 0x40000, 98, RI_ALL_ONLINE }, - { 0x401a8, 8, RI_E1HE2E3E3B0_ONLINE }, - { 0x401c8, 1, RI_E1H_ONLINE }, - { 0x401cc, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x401d4, 2, RI_E2E3E3B0_ONLINE }, - { 0x40200, 4, RI_ALL_ONLINE }, - { 0x40220, 6, RI_E2E3E3B0_ONLINE }, - { 0x40238, 8, RI_E2E3_ONLINE }, - { 0x40258, 4, RI_E2E3E3B0_ONLINE }, - { 0x40268, 2, RI_E3E3B0_ONLINE }, - { 0x40270, 17, RI_E3B0_ONLINE }, - { 0x40400, 43, RI_ALL_ONLINE }, - { 0x404cc, 3, RI_E1HE2E3E3B0_ONLINE }, - { 0x404e0, 1, RI_E2E3E3B0_ONLINE }, - { 0x40500, 2, RI_ALL_ONLINE }, - { 0x40510, 2, RI_ALL_ONLINE }, - { 0x40520, 2, RI_ALL_ONLINE }, - { 0x40530, 2, RI_ALL_ONLINE }, - { 0x40540, 2, RI_ALL_ONLINE }, - { 0x40550, 10, RI_E2E3E3B0_ONLINE }, - { 0x40610, 2, RI_E2E3E3B0_ONLINE }, - { 0x42000, 164, RI_ALL_ONLINE }, - { 0x422c0, 4, RI_E2E3E3B0_ONLINE }, - { 0x422d4, 5, RI_E1HE2E3E3B0_ONLINE }, - { 0x422e8, 1, RI_E2E3E3B0_ONLINE }, - { 0x42400, 49, RI_ALL_ONLINE }, - { 0x424c8, 38, RI_ALL_ONLINE }, - { 0x42568, 2, RI_ALL_ONLINE }, - { 0x42640, 5, RI_E2E3E3B0_ONLINE }, - { 0x42800, 1, RI_ALL_ONLINE }, - { 0x50000, 1, RI_ALL_ONLINE }, - { 0x50004, 19, RI_ALL_ONLINE }, - { 0x50050, 8, RI_ALL_ONLINE }, - { 0x50070, 88, RI_ALL_ONLINE }, - { 0x501f0, 4, RI_E1HE2E3E3B0_ONLINE }, - { 0x50200, 2, RI_ALL_ONLINE }, - { 0x5020c, 7, RI_ALL_ONLINE }, - { 0x50228, 6, RI_E1HE2E3E3B0_ONLINE }, - { 0x50240, 1, RI_ALL_ONLINE }, - { 0x50280, 1, RI_ALL_ONLINE }, - { 0x50300, 1, RI_E2E3E3B0_ONLINE }, - { 0x5030c, 1, RI_E2E3E3B0_ONLINE }, - { 0x50318, 1, RI_E2E3E3B0_ONLINE }, - { 0x5031c, 1, RI_E2E3E3B0_ONLINE }, - { 0x50320, 2, RI_E2E3E3B0_ONLINE }, - { 0x50330, 1, RI_E3B0_ONLINE }, - { 0x52000, 1, RI_ALL_ONLINE }, - { 0x54000, 1, RI_ALL_ONLINE }, - { 0x54004, 3327, RI_ALL_OFFLINE }, - { 0x58000, 1, RI_ALL_ONLINE }, - { 0x58004, 8191, RI_E1E1H_OFFLINE }, - { 0x60000, 26, RI_ALL_ONLINE }, - { 0x60068, 8, RI_E1E1H_ONLINE }, - { 0x60088, 12, RI_ALL_ONLINE }, - { 0x600b8, 9, RI_E1E1H_ONLINE }, - { 0x600dc, 1, RI_ALL_ONLINE }, - { 0x600e0, 5, RI_E1E1H_ONLINE }, - { 0x600f4, 1, RI_E1E1HE2_ONLINE }, - { 0x600f8, 1, RI_E1E1H_ONLINE }, - { 0x600fc, 8, RI_ALL_ONLINE }, - { 0x6013c, 24, RI_E1H_ONLINE }, - { 0x6019c, 2, RI_E2E3E3B0_ONLINE }, - { 0x601ac, 18, RI_E2E3E3B0_ONLINE }, - { 0x60200, 1, RI_ALL_ONLINE }, - { 0x60204, 2, RI_ALL_OFFLINE }, - { 0x60210, 13, RI_E2E3E3B0_ONLINE }, - { 0x60244, 16, RI_E3B0_ONLINE }, - { 0x61000, 1, RI_ALL_ONLINE }, - { 0x61004, 511, RI_ALL_OFFLINE }, - { 0x61800, 512, RI_E3E3B0_OFFLINE }, - { 0x70000, 8, RI_ALL_ONLINE }, - { 0x70020, 8184, RI_ALL_OFFLINE }, - { 0x78000, 8192, RI_E3E3B0_OFFLINE }, - { 0x85000, 3, RI_ALL_ONLINE }, - { 0x8501c, 7, RI_ALL_ONLINE }, - { 0x85048, 1, RI_ALL_ONLINE }, - { 0x85200, 32, RI_ALL_ONLINE }, - { 0xb0000, 16384, RI_E1H_ONLINE }, - { 0xc1000, 7, RI_ALL_ONLINE }, - { 0xc103c, 2, RI_E2E3E3B0_ONLINE }, - { 0xc1800, 2, RI_ALL_ONLINE }, - { 0xc2000, 164, RI_ALL_ONLINE }, - { 0xc22c0, 5, RI_E2E3E3B0_ONLINE }, - { 0xc22d8, 4, RI_E2E3E3B0_ONLINE }, - { 0xc2400, 49, RI_ALL_ONLINE }, - { 0xc24c8, 38, RI_ALL_ONLINE }, - { 0xc2568, 2, RI_ALL_ONLINE }, - { 0xc2600, 1, RI_ALL_ONLINE }, - { 0xc4000, 165, RI_ALL_ONLINE }, - { 0xc42d8, 2, RI_E2E3E3B0_ONLINE }, - { 0xc42e0, 7, RI_E1HE2E3E3B0_ONLINE }, - { 0xc42fc, 1, RI_E2E3E3B0_ONLINE }, - { 0xc4400, 51, RI_ALL_ONLINE }, - { 0xc44d0, 38, RI_ALL_ONLINE }, - { 0xc4570, 2, RI_ALL_ONLINE }, - { 0xc4578, 5, RI_E2E3E3B0_ONLINE }, - { 0xc4600, 1, RI_ALL_ONLINE }, - { 0xd0000, 19, RI_ALL_ONLINE }, - { 0xd004c, 8, RI_ALL_ONLINE }, - { 0xd006c, 91, RI_ALL_ONLINE }, - { 0xd01fc, 1, RI_E2E3E3B0_ONLINE }, - { 0xd0200, 2, RI_ALL_ONLINE }, - { 0xd020c, 7, RI_ALL_ONLINE }, - { 0xd0228, 18, RI_E1HE2E3E3B0_ONLINE }, - { 0xd0280, 1, RI_ALL_ONLINE }, - { 0xd0300, 1, RI_ALL_ONLINE }, - { 0xd0400, 1, RI_ALL_ONLINE }, - { 0xd0818, 1, RI_E3B0_ONLINE }, - { 0xd4000, 1, RI_ALL_ONLINE }, - { 0xd4004, 2559, RI_ALL_OFFLINE }, - { 0xd8000, 1, RI_ALL_ONLINE }, - { 0xd8004, 8191, RI_ALL_OFFLINE }, - { 0xe0000, 21, RI_ALL_ONLINE }, - { 0xe0054, 8, RI_ALL_ONLINE }, - { 0xe0074, 49, RI_ALL_ONLINE }, - { 0xe0138, 1, RI_E1E1H_ONLINE }, - { 0xe013c, 35, RI_ALL_ONLINE }, - { 0xe01f4, 1, RI_E2_ONLINE }, - { 0xe01f8, 1, RI_E2E3E3B0_ONLINE }, - { 0xe0200, 2, RI_ALL_ONLINE }, - { 0xe020c, 8, RI_ALL_ONLINE }, - { 0xe022c, 18, RI_E1HE2E3E3B0_ONLINE }, - { 0xe0280, 1, RI_ALL_ONLINE }, - { 0xe0300, 1, RI_ALL_ONLINE }, - { 0xe0400, 1, RI_E3B0_ONLINE }, - { 0xe1000, 1, RI_ALL_ONLINE }, - { 0xe2000, 1, RI_ALL_ONLINE }, - { 0xe2004, 2047, RI_ALL_OFFLINE }, - { 0xf0000, 1, RI_ALL_ONLINE }, - { 0xf0004, 16383, RI_ALL_OFFLINE }, - { 0x101000, 12, RI_ALL_ONLINE }, - { 0x101050, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x101054, 3, RI_E2E3E3B0_ONLINE }, - { 0x101100, 1, RI_ALL_ONLINE }, - { 0x101800, 8, RI_ALL_ONLINE }, - { 0x102000, 18, RI_ALL_ONLINE }, - { 0x102068, 6, RI_E2E3E3B0_ONLINE }, - { 0x102080, 17, RI_ALL_ONLINE }, - { 0x1020c8, 8, RI_E1H_ONLINE }, - { 0x1020e8, 9, RI_E2E3E3B0_ONLINE }, - { 0x102400, 1, RI_ALL_ONLINE }, - { 0x103000, 26, RI_ALL_ONLINE }, - { 0x103098, 5, RI_E1HE2E3E3B0_ONLINE }, - { 0x1030ac, 2, RI_E2E3E3B0_ONLINE }, - { 0x1030b4, 1, RI_E2_ONLINE }, - { 0x1030b8, 7, RI_E2E3E3B0_ONLINE }, - { 0x1030d8, 8, RI_E2E3E3B0_ONLINE }, - { 0x103400, 1, RI_E2E3E3B0_ONLINE }, - { 0x103404, 135, RI_E2E3E3B0_OFFLINE }, - { 0x103800, 8, RI_ALL_ONLINE }, - { 0x104000, 63, RI_ALL_ONLINE }, - { 0x10411c, 16, RI_E2E3E3B0_ONLINE }, - { 0x104200, 17, RI_ALL_ONLINE }, - { 0x104400, 64, RI_ALL_ONLINE }, - { 0x104500, 192, RI_ALL_OFFLINE }, - { 0x104800, 64, RI_ALL_ONLINE }, - { 0x104900, 192, RI_ALL_OFFLINE }, - { 0x105000, 256, RI_ALL_ONLINE }, - { 0x105400, 768, RI_ALL_OFFLINE }, - { 0x107000, 7, RI_E2E3E3B0_ONLINE }, - { 0x10701c, 1, RI_E3E3B0_ONLINE }, - { 0x108000, 33, RI_E1E1H_ONLINE }, - { 0x1080ac, 5, RI_E1H_ONLINE }, - { 0x108100, 5, RI_E1E1H_ONLINE }, - { 0x108120, 5, RI_E1E1H_ONLINE }, - { 0x108200, 74, RI_E1E1H_ONLINE }, - { 0x108400, 74, RI_E1E1H_ONLINE }, - { 0x108800, 152, RI_E1E1H_ONLINE }, - { 0x110000, 111, RI_E2E3E3B0_ONLINE }, - { 0x1101dc, 1, RI_E3E3B0_ONLINE }, - { 0x110200, 4, RI_E2E3E3B0_ONLINE }, - { 0x120000, 2, RI_ALL_ONLINE }, - { 0x120008, 4, RI_ALL_ONLINE }, - { 0x120018, 3, RI_ALL_ONLINE }, - { 0x120024, 4, RI_ALL_ONLINE }, - { 0x120034, 3, RI_ALL_ONLINE }, - { 0x120040, 4, RI_ALL_ONLINE }, - { 0x120050, 3, RI_ALL_ONLINE }, - { 0x12005c, 4, RI_ALL_ONLINE }, - { 0x12006c, 3, RI_ALL_ONLINE }, - { 0x120078, 4, RI_ALL_ONLINE }, - { 0x120088, 3, RI_ALL_ONLINE }, - { 0x120094, 4, RI_ALL_ONLINE }, - { 0x1200a4, 3, RI_ALL_ONLINE }, - { 0x1200b0, 4, RI_ALL_ONLINE }, - { 0x1200c0, 3, RI_ALL_ONLINE }, - { 0x1200cc, 4, RI_ALL_ONLINE }, - { 0x1200dc, 3, RI_ALL_ONLINE }, - { 0x1200e8, 4, RI_ALL_ONLINE }, - { 0x1200f8, 3, RI_ALL_ONLINE }, - { 0x120104, 4, RI_ALL_ONLINE }, - { 0x120114, 1, RI_ALL_ONLINE }, - { 0x120118, 22, RI_ALL_ONLINE }, - { 0x120170, 2, RI_E1E1H_ONLINE }, - { 0x120178, 243, RI_ALL_ONLINE }, - { 0x120544, 4, RI_E1E1H_ONLINE }, - { 0x120554, 6, RI_ALL_ONLINE }, - { 0x12059c, 6, RI_E1HE2E3E3B0_ONLINE }, - { 0x1205b4, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1205b8, 15, RI_E1HE2E3E3B0_ONLINE }, - { 0x1205f4, 1, RI_E1HE2_ONLINE }, - { 0x1205f8, 4, RI_E2E3E3B0_ONLINE }, - { 0x120618, 1, RI_E2E3E3B0_ONLINE }, - { 0x12061c, 20, RI_E1HE2E3E3B0_ONLINE }, - { 0x12066c, 11, RI_E1HE2E3E3B0_ONLINE }, - { 0x120698, 3, RI_E2E3E3B0_ONLINE }, - { 0x1206a4, 1, RI_E2_ONLINE }, - { 0x1206a8, 1, RI_E2E3E3B0_ONLINE }, - { 0x1206b0, 75, RI_E2E3E3B0_ONLINE }, - { 0x1207dc, 1, RI_E2_ONLINE }, - { 0x1207fc, 1, RI_E2E3E3B0_ONLINE }, - { 0x12080c, 65, RI_ALL_ONLINE }, - { 0x120910, 7, RI_E2E3E3B0_ONLINE }, - { 0x120930, 9, RI_E2E3E3B0_ONLINE }, - { 0x12095c, 37, RI_E3E3B0_ONLINE }, - { 0x120a00, 2, RI_E1E1HE2_ONLINE }, - { 0x120b00, 1, RI_E3E3B0_ONLINE }, - { 0x122000, 2, RI_ALL_ONLINE }, - { 0x122008, 2046, RI_E1_OFFLINE }, - { 0x128000, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x128008, 6142, RI_E1HE2E3E3B0_OFFLINE }, - { 0x130000, 35, RI_E2E3E3B0_ONLINE }, - { 0x130100, 29, RI_E2E3E3B0_ONLINE }, - { 0x130180, 1, RI_E2E3E3B0_ONLINE }, - { 0x130200, 1, RI_E2E3E3B0_ONLINE }, - { 0x130280, 1, RI_E2E3E3B0_ONLINE }, - { 0x130300, 5, RI_E2E3E3B0_ONLINE }, - { 0x130380, 1, RI_E2E3E3B0_ONLINE }, - { 0x130400, 1, RI_E2E3E3B0_ONLINE }, - { 0x130480, 5, RI_E2E3E3B0_ONLINE }, - { 0x130800, 72, RI_E2E3E3B0_ONLINE }, - { 0x131000, 136, RI_E2E3E3B0_ONLINE }, - { 0x132000, 148, RI_E2E3E3B0_ONLINE }, - { 0x134000, 544, RI_E2E3E3B0_ONLINE }, - { 0x140000, 1, RI_ALL_ONLINE }, - { 0x140004, 9, RI_E1E1HE2E3_ONLINE }, - { 0x140028, 8, RI_ALL_ONLINE }, - { 0x140048, 10, RI_E1E1HE2E3_ONLINE }, - { 0x140070, 1, RI_ALL_ONLINE }, - { 0x140074, 10, RI_E1E1HE2E3_ONLINE }, - { 0x14009c, 1, RI_ALL_ONLINE }, - { 0x1400a0, 5, RI_E1E1HE2E3_ONLINE }, - { 0x1400b4, 7, RI_ALL_ONLINE }, - { 0x1400d0, 10, RI_E1E1HE2E3_ONLINE }, - { 0x1400f8, 2, RI_ALL_ONLINE }, - { 0x140100, 5, RI_E1E1H_ONLINE }, - { 0x140114, 5, RI_E1E1HE2E3_ONLINE }, - { 0x140128, 7, RI_ALL_ONLINE }, - { 0x140144, 9, RI_E1E1HE2E3_ONLINE }, - { 0x140168, 8, RI_ALL_ONLINE }, - { 0x140188, 3, RI_E1E1HE2E3_ONLINE }, - { 0x140194, 13, RI_ALL_ONLINE }, - { 0x140200, 6, RI_E1E1HE2E3_ONLINE }, - { 0x140220, 4, RI_E2E3_ONLINE }, - { 0x140240, 4, RI_E2E3_ONLINE }, - { 0x140260, 4, RI_E2E3_ONLINE }, - { 0x140280, 4, RI_E2E3_ONLINE }, - { 0x1402a0, 4, RI_E2E3_ONLINE }, - { 0x1402c0, 4, RI_E2E3_ONLINE }, - { 0x1402e0, 2, RI_E2E3_ONLINE }, - { 0x1402e8, 2, RI_E2E3E3B0_ONLINE }, - { 0x1402f0, 9, RI_E2E3_ONLINE }, - { 0x140314, 44, RI_E3B0_ONLINE }, - { 0x1403d0, 70, RI_E3B0_ONLINE }, - { 0x144000, 4, RI_E1E1H_ONLINE }, - { 0x148000, 4, RI_E1E1H_ONLINE }, - { 0x14c000, 4, RI_E1E1H_ONLINE }, - { 0x150000, 4, RI_E1E1H_ONLINE }, - { 0x154000, 4, RI_E1E1H_ONLINE }, - { 0x158000, 4, RI_E1E1H_ONLINE }, - { 0x15c000, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x15c008, 5, RI_E1H_ONLINE }, - { 0x15c020, 8, RI_E2E3E3B0_ONLINE }, - { 0x15c040, 1, RI_E2E3_ONLINE }, - { 0x15c044, 2, RI_E2E3E3B0_ONLINE }, - { 0x15c04c, 8, RI_E2E3_ONLINE }, - { 0x15c06c, 8, RI_E2E3E3B0_ONLINE }, - { 0x15c090, 13, RI_E2E3E3B0_ONLINE }, - { 0x15c0c8, 24, RI_E2E3E3B0_ONLINE }, - { 0x15c128, 2, RI_E2E3_ONLINE }, - { 0x15c130, 8, RI_E2E3E3B0_ONLINE }, - { 0x15c150, 2, RI_E3E3B0_ONLINE }, - { 0x15c158, 2, RI_E3_ONLINE }, - { 0x15c160, 149, RI_E3B0_ONLINE }, - { 0x161000, 7, RI_ALL_ONLINE }, - { 0x16103c, 2, RI_E2E3E3B0_ONLINE }, - { 0x161800, 2, RI_ALL_ONLINE }, - { 0x162000, 54, RI_E3E3B0_ONLINE }, - { 0x162200, 60, RI_E3E3B0_ONLINE }, - { 0x162400, 54, RI_E3E3B0_ONLINE }, - { 0x162600, 60, RI_E3E3B0_ONLINE }, - { 0x162800, 54, RI_E3E3B0_ONLINE }, - { 0x162a00, 60, RI_E3E3B0_ONLINE }, - { 0x162c00, 54, RI_E3E3B0_ONLINE }, - { 0x162e00, 60, RI_E3E3B0_ONLINE }, - { 0x164000, 60, RI_ALL_ONLINE }, - { 0x164110, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x164118, 15, RI_E2E3E3B0_ONLINE }, - { 0x164200, 1, RI_ALL_ONLINE }, - { 0x164208, 1, RI_ALL_ONLINE }, - { 0x164210, 1, RI_ALL_ONLINE }, - { 0x164218, 1, RI_ALL_ONLINE }, - { 0x164220, 1, RI_ALL_ONLINE }, - { 0x164228, 1, RI_ALL_ONLINE }, - { 0x164230, 1, RI_ALL_ONLINE }, - { 0x164238, 1, RI_ALL_ONLINE }, - { 0x164240, 1, RI_ALL_ONLINE }, - { 0x164248, 1, RI_ALL_ONLINE }, - { 0x164250, 1, RI_ALL_ONLINE }, - { 0x164258, 1, RI_ALL_ONLINE }, - { 0x164260, 1, RI_ALL_ONLINE }, - { 0x164270, 2, RI_ALL_ONLINE }, - { 0x164280, 2, RI_ALL_ONLINE }, - { 0x164800, 2, RI_ALL_ONLINE }, - { 0x165000, 2, RI_ALL_ONLINE }, - { 0x166000, 164, RI_ALL_ONLINE }, - { 0x1662cc, 7, RI_E2E3E3B0_ONLINE }, - { 0x166400, 49, RI_ALL_ONLINE }, - { 0x1664c8, 38, RI_ALL_ONLINE }, - { 0x166568, 2, RI_ALL_ONLINE }, - { 0x166570, 5, RI_E2E3E3B0_ONLINE }, - { 0x166800, 1, RI_ALL_ONLINE }, - { 0x168000, 137, RI_ALL_ONLINE }, - { 0x168224, 2, RI_E1E1H_ONLINE }, - { 0x16822c, 29, RI_ALL_ONLINE }, - { 0x1682a0, 12, RI_E1E1H_ONLINE }, - { 0x1682d0, 12, RI_ALL_ONLINE }, - { 0x168300, 2, RI_E1E1H_ONLINE }, - { 0x168308, 68, RI_ALL_ONLINE }, - { 0x168418, 2, RI_E1E1H_ONLINE }, - { 0x168420, 6, RI_ALL_ONLINE }, - { 0x168800, 19, RI_ALL_ONLINE }, - { 0x168900, 1, RI_ALL_ONLINE }, - { 0x168a00, 128, RI_ALL_ONLINE }, - { 0x16a000, 1, RI_ALL_ONLINE }, - { 0x16a004, 1535, RI_ALL_OFFLINE }, - { 0x16c000, 1, RI_ALL_ONLINE }, - { 0x16c004, 1535, RI_ALL_OFFLINE }, - { 0x16e000, 16, RI_E1H_ONLINE }, - { 0x16e040, 8, RI_E2E3E3B0_ONLINE }, - { 0x16e100, 1, RI_E1H_ONLINE }, - { 0x16e200, 2, RI_E1H_ONLINE }, - { 0x16e400, 161, RI_E1H_ONLINE }, - { 0x16e684, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x16e68c, 12, RI_E1H_ONLINE }, - { 0x16e6bc, 4, RI_E1HE2E3E3B0_ONLINE }, - { 0x16e6cc, 4, RI_E1H_ONLINE }, - { 0x16e6e0, 2, RI_E2E3E3B0_ONLINE }, - { 0x16e6e8, 5, RI_E2E3_ONLINE }, - { 0x16e6fc, 5, RI_E2E3E3B0_ONLINE }, - { 0x16e768, 17, RI_E2E3E3B0_ONLINE }, - { 0x16e7ac, 12, RI_E3B0_ONLINE }, - { 0x170000, 24, RI_ALL_ONLINE }, - { 0x170060, 4, RI_E1E1H_ONLINE }, - { 0x170070, 65, RI_ALL_ONLINE }, - { 0x170194, 11, RI_E2E3E3B0_ONLINE }, - { 0x1701c4, 1, RI_E2E3E3B0_ONLINE }, - { 0x1701cc, 7, RI_E2E3E3B0_ONLINE }, - { 0x1701e8, 1, RI_E3E3B0_ONLINE }, - { 0x1701ec, 1, RI_E2E3E3B0_ONLINE }, - { 0x1701f4, 1, RI_E2E3E3B0_ONLINE }, - { 0x170200, 4, RI_ALL_ONLINE }, - { 0x170214, 1, RI_ALL_ONLINE }, - { 0x170218, 77, RI_E2E3E3B0_ONLINE }, - { 0x170400, 64, RI_E2E3E3B0_ONLINE }, - { 0x178000, 1, RI_ALL_ONLINE }, - { 0x180000, 61, RI_ALL_ONLINE }, - { 0x18013c, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x180200, 58, RI_ALL_ONLINE }, - { 0x180340, 4, RI_ALL_ONLINE }, - { 0x180380, 1, RI_E2E3E3B0_ONLINE }, - { 0x180388, 1, RI_E2E3E3B0_ONLINE }, - { 0x180390, 1, RI_E2E3E3B0_ONLINE }, - { 0x180398, 1, RI_E2E3E3B0_ONLINE }, - { 0x1803a0, 5, RI_E2E3E3B0_ONLINE }, - { 0x1803b4, 2, RI_E3E3B0_ONLINE }, - { 0x180400, 1, RI_ALL_ONLINE }, - { 0x180404, 255, RI_E1E1H_OFFLINE }, - { 0x181000, 4, RI_ALL_ONLINE }, - { 0x181010, 1020, RI_ALL_OFFLINE }, - { 0x182000, 4, RI_E3E3B0_ONLINE }, - { 0x1a0000, 1, RI_ALL_ONLINE }, - { 0x1a0004, 5631, RI_ALL_OFFLINE }, - { 0x1a5800, 2560, RI_E1HE2E3E3B0_OFFLINE }, - { 0x1a8000, 1, RI_ALL_ONLINE }, - { 0x1a8004, 8191, RI_E1HE2E3E3B0_OFFLINE }, - { 0x1b0000, 1, RI_ALL_ONLINE }, - { 0x1b0004, 15, RI_E1H_OFFLINE }, - { 0x1b0040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b0044, 239, RI_E1H_OFFLINE }, - { 0x1b0400, 1, RI_ALL_ONLINE }, - { 0x1b0404, 255, RI_E1H_OFFLINE }, - { 0x1b0800, 1, RI_ALL_ONLINE }, - { 0x1b0840, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b0c00, 1, RI_ALL_ONLINE }, - { 0x1b1000, 1, RI_ALL_ONLINE }, - { 0x1b1040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b1400, 1, RI_ALL_ONLINE }, - { 0x1b1440, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b1480, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b14c0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b1800, 128, RI_ALL_OFFLINE }, - { 0x1b1c00, 128, RI_ALL_OFFLINE }, - { 0x1b2000, 1, RI_ALL_ONLINE }, - { 0x1b2400, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b2404, 5631, RI_E2E3E3B0_OFFLINE }, - { 0x1b8000, 1, RI_ALL_ONLINE }, - { 0x1b8040, 1, RI_ALL_ONLINE }, - { 0x1b8080, 1, RI_ALL_ONLINE }, - { 0x1b80c0, 1, RI_ALL_ONLINE }, - { 0x1b8100, 1, RI_ALL_ONLINE }, - { 0x1b8140, 1, RI_ALL_ONLINE }, - { 0x1b8180, 1, RI_ALL_ONLINE }, - { 0x1b81c0, 1, RI_ALL_ONLINE }, - { 0x1b8200, 1, RI_ALL_ONLINE }, - { 0x1b8240, 1, RI_ALL_ONLINE }, - { 0x1b8280, 1, RI_ALL_ONLINE }, - { 0x1b82c0, 1, RI_ALL_ONLINE }, - { 0x1b8300, 1, RI_ALL_ONLINE }, - { 0x1b8340, 1, RI_ALL_ONLINE }, - { 0x1b8380, 1, RI_ALL_ONLINE }, - { 0x1b83c0, 1, RI_ALL_ONLINE }, - { 0x1b8400, 1, RI_ALL_ONLINE }, - { 0x1b8440, 1, RI_ALL_ONLINE }, - { 0x1b8480, 1, RI_ALL_ONLINE }, - { 0x1b84c0, 1, RI_ALL_ONLINE }, - { 0x1b8500, 1, RI_ALL_ONLINE }, - { 0x1b8540, 1, RI_ALL_ONLINE }, - { 0x1b8580, 1, RI_ALL_ONLINE }, - { 0x1b85c0, 19, RI_E2E3E3B0_ONLINE }, - { 0x1b8800, 1, RI_ALL_ONLINE }, - { 0x1b8840, 1, RI_ALL_ONLINE }, - { 0x1b8880, 1, RI_ALL_ONLINE }, - { 0x1b88c0, 1, RI_ALL_ONLINE }, - { 0x1b8900, 1, RI_ALL_ONLINE }, - { 0x1b8940, 1, RI_ALL_ONLINE }, - { 0x1b8980, 1, RI_ALL_ONLINE }, - { 0x1b89c0, 1, RI_ALL_ONLINE }, - { 0x1b8a00, 1, RI_ALL_ONLINE }, - { 0x1b8a40, 1, RI_ALL_ONLINE }, - { 0x1b8a80, 1, RI_ALL_ONLINE }, - { 0x1b8ac0, 1, RI_ALL_ONLINE }, - { 0x1b8b00, 1, RI_ALL_ONLINE }, - { 0x1b8b40, 1, RI_ALL_ONLINE }, - { 0x1b8b80, 1, RI_ALL_ONLINE }, - { 0x1b8bc0, 1, RI_ALL_ONLINE }, - { 0x1b8c00, 1, RI_ALL_ONLINE }, - { 0x1b8c40, 1, RI_ALL_ONLINE }, - { 0x1b8c80, 1, RI_ALL_ONLINE }, - { 0x1b8cc0, 1, RI_ALL_ONLINE }, - { 0x1b8cc4, 1, RI_E2E3E3B0_ONLINE }, - { 0x1b8d00, 1, RI_ALL_ONLINE }, - { 0x1b8d40, 1, RI_ALL_ONLINE }, - { 0x1b8d80, 1, RI_ALL_ONLINE }, - { 0x1b8dc0, 1, RI_ALL_ONLINE }, - { 0x1b8e00, 1, RI_ALL_ONLINE }, - { 0x1b8e40, 1, RI_ALL_ONLINE }, - { 0x1b8e80, 1, RI_ALL_ONLINE }, - { 0x1b8e84, 1, RI_E2E3E3B0_ONLINE }, - { 0x1b8ec0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b8f00, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b8f40, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b8f80, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b8fc0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b8fc4, 2, RI_E2E3E3B0_ONLINE }, - { 0x1b8fd0, 6, RI_E2E3E3B0_ONLINE }, - { 0x1b8fe8, 2, RI_E3E3B0_ONLINE }, - { 0x1b9000, 1, RI_E2E3E3B0_ONLINE }, - { 0x1b9040, 3, RI_E2E3E3B0_ONLINE }, - { 0x1b905c, 1, RI_E3E3B0_ONLINE }, - { 0x1b9064, 1, RI_E3B0_ONLINE }, - { 0x1b9080, 10, RI_E3B0_ONLINE }, - { 0x1b9400, 14, RI_E2E3E3B0_ONLINE }, - { 0x1b943c, 19, RI_E2E3E3B0_ONLINE }, - { 0x1b9490, 10, RI_E2E3E3B0_ONLINE }, - { 0x1c0000, 2, RI_ALL_ONLINE }, - { 0x200000, 65, RI_ALL_ONLINE }, - { 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x200200, 58, RI_ALL_ONLINE }, - { 0x200340, 4, RI_ALL_ONLINE }, - { 0x200380, 1, RI_E2E3E3B0_ONLINE }, - { 0x200388, 1, RI_E2E3E3B0_ONLINE }, - { 0x200390, 1, RI_E2E3E3B0_ONLINE }, - { 0x200398, 1, RI_E2E3E3B0_ONLINE }, - { 0x2003a0, 1, RI_E2E3E3B0_ONLINE }, - { 0x2003a8, 2, RI_E2E3E3B0_ONLINE }, - { 0x200400, 1, RI_ALL_ONLINE }, - { 0x200404, 255, RI_E1E1H_OFFLINE }, - { 0x202000, 4, RI_ALL_ONLINE }, - { 0x202010, 2044, RI_ALL_OFFLINE }, - { 0x204000, 4, RI_E3E3B0_ONLINE }, - { 0x220000, 1, RI_ALL_ONLINE }, - { 0x220004, 5631, RI_ALL_OFFLINE }, - { 0x225800, 2560, RI_E1HE2E3E3B0_OFFLINE }, - { 0x228000, 1, RI_ALL_ONLINE }, - { 0x228004, 8191, RI_E1HE2E3E3B0_OFFLINE }, - { 0x230000, 1, RI_ALL_ONLINE }, - { 0x230004, 15, RI_E1H_OFFLINE }, - { 0x230040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x230044, 239, RI_E1H_OFFLINE }, - { 0x230400, 1, RI_ALL_ONLINE }, - { 0x230404, 255, RI_E1H_OFFLINE }, - { 0x230800, 1, RI_ALL_ONLINE }, - { 0x230840, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x230c00, 1, RI_ALL_ONLINE }, - { 0x231000, 1, RI_ALL_ONLINE }, - { 0x231040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x231400, 1, RI_ALL_ONLINE }, - { 0x231440, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x231480, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2314c0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x231800, 128, RI_ALL_OFFLINE }, - { 0x231c00, 128, RI_ALL_OFFLINE }, - { 0x232000, 1, RI_ALL_ONLINE }, - { 0x232400, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x232404, 5631, RI_E2E3E3B0_OFFLINE }, - { 0x238000, 1, RI_ALL_ONLINE }, - { 0x238040, 1, RI_ALL_ONLINE }, - { 0x238080, 1, RI_ALL_ONLINE }, - { 0x2380c0, 1, RI_ALL_ONLINE }, - { 0x238100, 1, RI_ALL_ONLINE }, - { 0x238140, 1, RI_ALL_ONLINE }, - { 0x238180, 1, RI_ALL_ONLINE }, - { 0x2381c0, 1, RI_ALL_ONLINE }, - { 0x238200, 1, RI_ALL_ONLINE }, - { 0x238240, 1, RI_ALL_ONLINE }, - { 0x238280, 1, RI_ALL_ONLINE }, - { 0x2382c0, 1, RI_ALL_ONLINE }, - { 0x238300, 1, RI_ALL_ONLINE }, - { 0x238340, 1, RI_ALL_ONLINE }, - { 0x238380, 1, RI_ALL_ONLINE }, - { 0x2383c0, 1, RI_ALL_ONLINE }, - { 0x238400, 1, RI_ALL_ONLINE }, - { 0x238440, 1, RI_ALL_ONLINE }, - { 0x238480, 1, RI_ALL_ONLINE }, - { 0x2384c0, 1, RI_ALL_ONLINE }, - { 0x238500, 1, RI_ALL_ONLINE }, - { 0x238540, 1, RI_ALL_ONLINE }, - { 0x238580, 1, RI_ALL_ONLINE }, - { 0x2385c0, 19, RI_E2E3E3B0_ONLINE }, - { 0x238800, 1, RI_ALL_ONLINE }, - { 0x238840, 1, RI_ALL_ONLINE }, - { 0x238880, 1, RI_ALL_ONLINE }, - { 0x2388c0, 1, RI_ALL_ONLINE }, - { 0x238900, 1, RI_ALL_ONLINE }, - { 0x238940, 1, RI_ALL_ONLINE }, - { 0x238980, 1, RI_ALL_ONLINE }, - { 0x2389c0, 1, RI_ALL_ONLINE }, - { 0x238a00, 1, RI_ALL_ONLINE }, - { 0x238a40, 1, RI_ALL_ONLINE }, - { 0x238a80, 1, RI_ALL_ONLINE }, - { 0x238ac0, 1, RI_ALL_ONLINE }, - { 0x238b00, 1, RI_ALL_ONLINE }, - { 0x238b40, 1, RI_ALL_ONLINE }, - { 0x238b80, 1, RI_ALL_ONLINE }, - { 0x238bc0, 1, RI_ALL_ONLINE }, - { 0x238c00, 1, RI_ALL_ONLINE }, - { 0x238c40, 1, RI_ALL_ONLINE }, - { 0x238c80, 1, RI_ALL_ONLINE }, - { 0x238cc0, 1, RI_ALL_ONLINE }, - { 0x238cc4, 1, RI_E2E3E3B0_ONLINE }, - { 0x238d00, 1, RI_ALL_ONLINE }, - { 0x238d40, 1, RI_ALL_ONLINE }, - { 0x238d80, 1, RI_ALL_ONLINE }, - { 0x238dc0, 1, RI_ALL_ONLINE }, - { 0x238e00, 1, RI_ALL_ONLINE }, - { 0x238e40, 1, RI_ALL_ONLINE }, - { 0x238e80, 1, RI_ALL_ONLINE }, - { 0x238e84, 1, RI_E2E3E3B0_ONLINE }, - { 0x238ec0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x238f00, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x238f40, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x238f80, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x238fc0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x238fc4, 2, RI_E2E3E3B0_ONLINE }, - { 0x238fd0, 6, RI_E2E3E3B0_ONLINE }, - { 0x238fe8, 2, RI_E3E3B0_ONLINE }, - { 0x239000, 1, RI_E2E3E3B0_ONLINE }, - { 0x239040, 3, RI_E2E3E3B0_ONLINE }, - { 0x23905c, 1, RI_E3E3B0_ONLINE }, - { 0x239064, 1, RI_E3B0_ONLINE }, - { 0x239080, 10, RI_E3B0_ONLINE }, - { 0x240000, 2, RI_ALL_ONLINE }, - { 0x280000, 65, RI_ALL_ONLINE }, - { 0x28014c, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x280200, 58, RI_ALL_ONLINE }, - { 0x280340, 4, RI_ALL_ONLINE }, - { 0x280380, 1, RI_E2E3E3B0_ONLINE }, - { 0x280388, 1, RI_E2E3E3B0_ONLINE }, - { 0x280390, 1, RI_E2E3E3B0_ONLINE }, - { 0x280398, 1, RI_E2E3E3B0_ONLINE }, - { 0x2803a0, 1, RI_E2E3E3B0_ONLINE }, - { 0x2803a8, 2, RI_E2E3E3B0_ONLINE }, - { 0x280400, 1, RI_ALL_ONLINE }, - { 0x280404, 255, RI_E1E1H_OFFLINE }, - { 0x282000, 4, RI_ALL_ONLINE }, - { 0x282010, 2044, RI_ALL_OFFLINE }, - { 0x284000, 4, RI_E3E3B0_ONLINE }, - { 0x2a0000, 1, RI_ALL_ONLINE }, - { 0x2a0004, 5631, RI_ALL_OFFLINE }, - { 0x2a5800, 2560, RI_E1HE2E3E3B0_OFFLINE }, - { 0x2a8000, 1, RI_ALL_ONLINE }, - { 0x2a8004, 8191, RI_E1HE2E3E3B0_OFFLINE }, - { 0x2b0000, 1, RI_ALL_ONLINE }, - { 0x2b0004, 15, RI_E1H_OFFLINE }, - { 0x2b0040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b0044, 239, RI_E1H_OFFLINE }, - { 0x2b0400, 1, RI_ALL_ONLINE }, - { 0x2b0404, 255, RI_E1H_OFFLINE }, - { 0x2b0800, 1, RI_ALL_ONLINE }, - { 0x2b0840, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b0c00, 1, RI_ALL_ONLINE }, - { 0x2b1000, 1, RI_ALL_ONLINE }, - { 0x2b1040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b1400, 1, RI_ALL_ONLINE }, - { 0x2b1440, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b1480, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b14c0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b1800, 128, RI_ALL_OFFLINE }, - { 0x2b1c00, 128, RI_ALL_OFFLINE }, - { 0x2b2000, 1, RI_ALL_ONLINE }, - { 0x2b2400, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b2404, 5631, RI_E2E3E3B0_OFFLINE }, - { 0x2b8000, 1, RI_ALL_ONLINE }, - { 0x2b8040, 1, RI_ALL_ONLINE }, - { 0x2b8080, 1, RI_ALL_ONLINE }, - { 0x2b80c0, 1, RI_ALL_ONLINE }, - { 0x2b8100, 1, RI_ALL_ONLINE }, - { 0x2b8140, 1, RI_ALL_ONLINE }, - { 0x2b8180, 1, RI_ALL_ONLINE }, - { 0x2b81c0, 1, RI_ALL_ONLINE }, - { 0x2b8200, 1, RI_ALL_ONLINE }, - { 0x2b8240, 1, RI_ALL_ONLINE }, - { 0x2b8280, 1, RI_ALL_ONLINE }, - { 0x2b82c0, 1, RI_ALL_ONLINE }, - { 0x2b8300, 1, RI_ALL_ONLINE }, - { 0x2b8340, 1, RI_ALL_ONLINE }, - { 0x2b8380, 1, RI_ALL_ONLINE }, - { 0x2b83c0, 1, RI_ALL_ONLINE }, - { 0x2b8400, 1, RI_ALL_ONLINE }, - { 0x2b8440, 1, RI_ALL_ONLINE }, - { 0x2b8480, 1, RI_ALL_ONLINE }, - { 0x2b84c0, 1, RI_ALL_ONLINE }, - { 0x2b8500, 1, RI_ALL_ONLINE }, - { 0x2b8540, 1, RI_ALL_ONLINE }, - { 0x2b8580, 1, RI_ALL_ONLINE }, - { 0x2b85c0, 19, RI_E2E3E3B0_ONLINE }, - { 0x2b8800, 1, RI_ALL_ONLINE }, - { 0x2b8840, 1, RI_ALL_ONLINE }, - { 0x2b8880, 1, RI_ALL_ONLINE }, - { 0x2b88c0, 1, RI_ALL_ONLINE }, - { 0x2b8900, 1, RI_ALL_ONLINE }, - { 0x2b8940, 1, RI_ALL_ONLINE }, - { 0x2b8980, 1, RI_ALL_ONLINE }, - { 0x2b89c0, 1, RI_ALL_ONLINE }, - { 0x2b8a00, 1, RI_ALL_ONLINE }, - { 0x2b8a40, 1, RI_ALL_ONLINE }, - { 0x2b8a80, 1, RI_ALL_ONLINE }, - { 0x2b8ac0, 1, RI_ALL_ONLINE }, - { 0x2b8b00, 1, RI_ALL_ONLINE }, - { 0x2b8b40, 1, RI_ALL_ONLINE }, - { 0x2b8b80, 1, RI_ALL_ONLINE }, - { 0x2b8bc0, 1, RI_ALL_ONLINE }, - { 0x2b8c00, 1, RI_ALL_ONLINE }, - { 0x2b8c40, 1, RI_ALL_ONLINE }, - { 0x2b8c80, 1, RI_ALL_ONLINE }, - { 0x2b8cc0, 1, RI_ALL_ONLINE }, - { 0x2b8cc4, 1, RI_E2E3E3B0_ONLINE }, - { 0x2b8d00, 1, RI_ALL_ONLINE }, - { 0x2b8d40, 1, RI_ALL_ONLINE }, - { 0x2b8d80, 1, RI_ALL_ONLINE }, - { 0x2b8dc0, 1, RI_ALL_ONLINE }, - { 0x2b8e00, 1, RI_ALL_ONLINE }, - { 0x2b8e40, 1, RI_ALL_ONLINE }, - { 0x2b8e80, 1, RI_ALL_ONLINE }, - { 0x2b8e84, 1, RI_E2E3E3B0_ONLINE }, - { 0x2b8ec0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b8f00, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b8f40, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b8f80, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b8fc0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b8fc4, 2, RI_E2E3E3B0_ONLINE }, - { 0x2b8fd0, 6, RI_E2E3E3B0_ONLINE }, - { 0x2b8fe8, 2, RI_E3E3B0_ONLINE }, - { 0x2b9000, 1, RI_E2E3E3B0_ONLINE }, - { 0x2b9040, 3, RI_E2E3E3B0_ONLINE }, - { 0x2b905c, 1, RI_E3E3B0_ONLINE }, - { 0x2b9064, 1, RI_E3B0_ONLINE }, - { 0x2b9080, 10, RI_E3B0_ONLINE }, - { 0x2b9400, 14, RI_E2E3E3B0_ONLINE }, - { 0x2b943c, 19, RI_E2E3E3B0_ONLINE }, - { 0x2b9490, 10, RI_E2E3E3B0_ONLINE }, - { 0x2c0000, 2, RI_ALL_ONLINE }, - { 0x300000, 65, RI_ALL_ONLINE }, - { 0x30014c, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x300200, 58, RI_ALL_ONLINE }, - { 0x300340, 4, RI_ALL_ONLINE }, - { 0x300380, 1, RI_E2E3E3B0_ONLINE }, - { 0x300388, 1, RI_E2E3E3B0_ONLINE }, - { 0x300390, 1, RI_E2E3E3B0_ONLINE }, - { 0x300398, 1, RI_E2E3E3B0_ONLINE }, - { 0x3003a0, 1, RI_E2E3E3B0_ONLINE }, - { 0x3003a8, 2, RI_E2E3E3B0_ONLINE }, - { 0x300400, 1, RI_ALL_ONLINE }, - { 0x300404, 255, RI_E1E1H_OFFLINE }, - { 0x302000, 4, RI_ALL_ONLINE }, - { 0x302010, 2044, RI_ALL_OFFLINE }, - { 0x304000, 4, RI_E3E3B0_ONLINE }, - { 0x320000, 1, RI_ALL_ONLINE }, - { 0x320004, 5631, RI_ALL_OFFLINE }, - { 0x325800, 2560, RI_E1HE2E3E3B0_OFFLINE }, - { 0x328000, 1, RI_ALL_ONLINE }, - { 0x328004, 8191, RI_E1HE2E3E3B0_OFFLINE }, - { 0x330000, 1, RI_ALL_ONLINE }, - { 0x330004, 15, RI_E1H_OFFLINE }, - { 0x330040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x330044, 239, RI_E1H_OFFLINE }, - { 0x330400, 1, RI_ALL_ONLINE }, - { 0x330404, 255, RI_E1H_OFFLINE }, - { 0x330800, 1, RI_ALL_ONLINE }, - { 0x330840, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x330c00, 1, RI_ALL_ONLINE }, - { 0x331000, 1, RI_ALL_ONLINE }, - { 0x331040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x331400, 1, RI_ALL_ONLINE }, - { 0x331440, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x331480, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x3314c0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x331800, 128, RI_ALL_OFFLINE }, - { 0x331c00, 128, RI_ALL_OFFLINE }, - { 0x332000, 1, RI_ALL_ONLINE }, - { 0x332400, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x332404, 5631, RI_E2E3E3B0_OFFLINE }, - { 0x338000, 1, RI_ALL_ONLINE }, - { 0x338040, 1, RI_ALL_ONLINE }, - { 0x338080, 1, RI_ALL_ONLINE }, - { 0x3380c0, 1, RI_ALL_ONLINE }, - { 0x338100, 1, RI_ALL_ONLINE }, - { 0x338140, 1, RI_ALL_ONLINE }, - { 0x338180, 1, RI_ALL_ONLINE }, - { 0x3381c0, 1, RI_ALL_ONLINE }, - { 0x338200, 1, RI_ALL_ONLINE }, - { 0x338240, 1, RI_ALL_ONLINE }, - { 0x338280, 1, RI_ALL_ONLINE }, - { 0x3382c0, 1, RI_ALL_ONLINE }, - { 0x338300, 1, RI_ALL_ONLINE }, - { 0x338340, 1, RI_ALL_ONLINE }, - { 0x338380, 1, RI_ALL_ONLINE }, - { 0x3383c0, 1, RI_ALL_ONLINE }, - { 0x338400, 1, RI_ALL_ONLINE }, - { 0x338440, 1, RI_ALL_ONLINE }, - { 0x338480, 1, RI_ALL_ONLINE }, - { 0x3384c0, 1, RI_ALL_ONLINE }, - { 0x338500, 1, RI_ALL_ONLINE }, - { 0x338540, 1, RI_ALL_ONLINE }, - { 0x338580, 1, RI_ALL_ONLINE }, - { 0x3385c0, 19, RI_E2E3E3B0_ONLINE }, - { 0x338800, 1, RI_ALL_ONLINE }, - { 0x338840, 1, RI_ALL_ONLINE }, - { 0x338880, 1, RI_ALL_ONLINE }, - { 0x3388c0, 1, RI_ALL_ONLINE }, - { 0x338900, 1, RI_ALL_ONLINE }, - { 0x338940, 1, RI_ALL_ONLINE }, - { 0x338980, 1, RI_ALL_ONLINE }, - { 0x3389c0, 1, RI_ALL_ONLINE }, - { 0x338a00, 1, RI_ALL_ONLINE }, - { 0x338a40, 1, RI_ALL_ONLINE }, - { 0x338a80, 1, RI_ALL_ONLINE }, - { 0x338ac0, 1, RI_ALL_ONLINE }, - { 0x338b00, 1, RI_ALL_ONLINE }, - { 0x338b40, 1, RI_ALL_ONLINE }, - { 0x338b80, 1, RI_ALL_ONLINE }, - { 0x338bc0, 1, RI_ALL_ONLINE }, - { 0x338c00, 1, RI_ALL_ONLINE }, - { 0x338c40, 1, RI_ALL_ONLINE }, - { 0x338c80, 1, RI_ALL_ONLINE }, - { 0x338cc0, 1, RI_ALL_ONLINE }, - { 0x338cc4, 1, RI_E2E3E3B0_ONLINE }, - { 0x338d00, 1, RI_ALL_ONLINE }, - { 0x338d40, 1, RI_ALL_ONLINE }, - { 0x338d80, 1, RI_ALL_ONLINE }, - { 0x338dc0, 1, RI_ALL_ONLINE }, - { 0x338e00, 1, RI_ALL_ONLINE }, - { 0x338e40, 1, RI_ALL_ONLINE }, - { 0x338e80, 1, RI_ALL_ONLINE }, - { 0x338e84, 1, RI_E2E3E3B0_ONLINE }, - { 0x338ec0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x338f00, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x338f40, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x338f80, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x338fc0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x338fc4, 2, RI_E2E3E3B0_ONLINE }, - { 0x338fd0, 6, RI_E2E3E3B0_ONLINE }, - { 0x338fe8, 2, RI_E3E3B0_ONLINE }, - { 0x339000, 1, RI_E2E3E3B0_ONLINE }, - { 0x339040, 3, RI_E2E3E3B0_ONLINE }, - { 0x33905c, 1, RI_E3E3B0_ONLINE }, - { 0x339064, 1, RI_E3B0_ONLINE }, - { 0x339080, 10, RI_E3B0_ONLINE }, - { 0x340000, 2, RI_ALL_ONLINE }, + { 0x2000, 1, 0x1f, 0xfff}, + { 0x2004, 1, 0x1f, 0x1fff}, + { 0x2008, 25, 0x1f, 0xfff}, + { 0x206c, 1, 0x1f, 0x1fff}, + { 0x2070, 313, 0x1f, 0xfff}, + { 0x2800, 103, 0x1f, 0xfff}, + { 0x3000, 287, 0x1f, 0xfff}, + { 0x3800, 331, 0x1f, 0xfff}, + { 0x8800, 6, 0x1f, 0x924}, + { 0x8818, 1, 0x1e, 0x924}, + { 0x9000, 4, 0x1c, 0x924}, + { 0x9010, 7, 0x1c, 0xfff}, + { 0x902c, 1, 0x1c, 0x924}, + { 0x9030, 1, 0x1c, 0xfff}, + { 0x9034, 13, 0x1c, 0x924}, + { 0x9068, 16, 0x1c, 0xfff}, + { 0x90a8, 98, 0x1c, 0x924}, + { 0x9230, 2, 0x1c, 0xfff}, + { 0x9238, 3, 0x1c, 0x924}, + { 0x9244, 1, 0x1c, 0xfff}, + { 0x9248, 1, 0x1c, 0x924}, + { 0x924c, 1, 0x4, 0x924}, + { 0x9250, 16, 0x1c, 0x924}, + { 0x92a8, 2, 0x1c, 0x1fff}, + { 0x92b4, 1, 0x1c, 0x1fff}, + { 0x9400, 33, 0x1c, 0x924}, + { 0x9484, 5, 0x18, 0x924}, + { 0xa000, 27, 0x1f, 0x924}, + { 0xa06c, 1, 0x3, 0x924}, + { 0xa070, 2, 0x1f, 0x924}, + { 0xa078, 1, 0x1f, 0x1fff}, + { 0xa07c, 31, 0x1f, 0x924}, + { 0xa0f8, 1, 0x1f, 0x1fff}, + { 0xa0fc, 3, 0x1f, 0x924}, + { 0xa108, 1, 0x1f, 0x1fff}, + { 0xa10c, 3, 0x1f, 0x924}, + { 0xa118, 1, 0x1f, 0x1fff}, + { 0xa11c, 28, 0x1f, 0x924}, + { 0xa18c, 4, 0x3, 0x924}, + { 0xa19c, 3, 0x1f, 0x924}, + { 0xa1a8, 1, 0x1f, 0x1fff}, + { 0xa1ac, 3, 0x1f, 0x924}, + { 0xa1b8, 1, 0x1f, 0x1fff}, + { 0xa1bc, 54, 0x1f, 0x924}, + { 0xa294, 2, 0x3, 0x924}, + { 0xa29c, 2, 0x1f, 0x924}, + { 0xa2a4, 2, 0x7, 0x924}, + { 0xa2ac, 2, 0x1f, 0x924}, + { 0xa2b4, 1, 0x1f, 0x1fff}, + { 0xa2b8, 49, 0x1f, 0x924}, + { 0xa38c, 2, 0x1f, 0x1fff}, + { 0xa398, 1, 0x1f, 0x1fff}, + { 0xa39c, 7, 0x1e, 0x924}, + { 0xa3b8, 2, 0x18, 0x924}, + { 0xa3c0, 1, 0x1e, 0x924}, + { 0xa3c4, 1, 0x1e, 0xfff}, + { 0xa3c8, 1, 0x1e, 0x924}, + { 0xa3d0, 1, 0x1e, 0x924}, + { 0xa3d8, 1, 0x1e, 0x924}, + { 0xa3e0, 1, 0x1e, 0x924}, + { 0xa3e8, 1, 0x1e, 0x924}, + { 0xa3f0, 1, 0x1e, 0x924}, + { 0xa3f8, 1, 0x1e, 0x924}, + { 0xa400, 1, 0x1f, 0x924}, + { 0xa404, 1, 0x1f, 0xfff}, + { 0xa408, 2, 0x1f, 0x1fff}, + { 0xa410, 7, 0x1f, 0x924}, + { 0xa42c, 12, 0x1f, 0xfff}, + { 0xa45c, 1, 0x1f, 0x924}, + { 0xa460, 1, 0x1f, 0x1924}, + { 0xa464, 15, 0x1f, 0x924}, + { 0xa4a0, 1, 0x7, 0x924}, + { 0xa4a4, 2, 0x1f, 0x924}, + { 0xa4ac, 2, 0x3, 0x924}, + { 0xa4b4, 1, 0x7, 0x924}, + { 0xa4b8, 2, 0x3, 0x924}, + { 0xa4c0, 3, 0x1f, 0x924}, + { 0xa4cc, 5, 0x3, 0x924}, + { 0xa4e0, 3, 0x1f, 0x924}, + { 0xa4fc, 2, 0x1f, 0x924}, + { 0xa504, 1, 0x3, 0x924}, + { 0xa508, 3, 0x1f, 0x924}, + { 0xa518, 1, 0x1f, 0x924}, + { 0xa520, 1, 0x1f, 0x924}, + { 0xa528, 1, 0x1f, 0x924}, + { 0xa530, 1, 0x1f, 0x924}, + { 0xa538, 1, 0x1f, 0x924}, + { 0xa540, 1, 0x1f, 0x924}, + { 0xa548, 1, 0x3, 0x924}, + { 0xa550, 1, 0x3, 0x924}, + { 0xa558, 1, 0x3, 0x924}, + { 0xa560, 1, 0x3, 0x924}, + { 0xa568, 1, 0x3, 0x924}, + { 0xa570, 1, 0x1f, 0x924}, + { 0xa580, 1, 0x1f, 0x1fff}, + { 0xa590, 1, 0x1f, 0x1fff}, + { 0xa5a0, 1, 0x7, 0x924}, + { 0xa5c0, 1, 0x1f, 0x924}, + { 0xa5e0, 1, 0x1e, 0x924}, + { 0xa5e8, 1, 0x1e, 0x924}, + { 0xa5f0, 1, 0x1e, 0x924}, + { 0xa5f8, 1, 0x6, 0x924}, + { 0xa5fc, 1, 0x1e, 0x924}, + { 0xa600, 5, 0x1e, 0xfff}, + { 0xa614, 1, 0x1e, 0x924}, + { 0xa618, 1, 0x1e, 0xfff}, + { 0xa61c, 1, 0x1e, 0x924}, + { 0xa620, 6, 0x1c, 0x924}, + { 0xa638, 20, 0x4, 0x924}, + { 0xa688, 35, 0x1c, 0x924}, + { 0xa714, 1, 0x1c, 0xfff}, + { 0xa718, 2, 0x1c, 0x924}, + { 0xa720, 1, 0x1c, 0xfff}, + { 0xa724, 3, 0x1c, 0x924}, + { 0xa730, 1, 0x4, 0x924}, + { 0xa734, 2, 0x1c, 0x924}, + { 0xa73c, 4, 0x4, 0x924}, + { 0xa74c, 1, 0x1c, 0x924}, + { 0xa750, 1, 0x1c, 0xfff}, + { 0xa754, 3, 0x1c, 0x924}, + { 0xa760, 5, 0x4, 0x924}, + { 0xa774, 7, 0x1c, 0x924}, + { 0xa790, 15, 0x4, 0x924}, + { 0xa7cc, 4, 0x1c, 0x924}, + { 0xa7e0, 6, 0x18, 0x924}, + { 0xa800, 18, 0x4, 0x924}, + { 0xa848, 33, 0x1c, 0x924}, + { 0xa8cc, 2, 0x18, 0x924}, + { 0xa8d4, 4, 0x1c, 0x924}, + { 0xa8e4, 1, 0x18, 0x924}, + { 0xa8e8, 1, 0x1c, 0x924}, + { 0xa8f0, 1, 0x1c, 0x924}, + { 0xa8f8, 30, 0x18, 0x924}, + { 0xa974, 73, 0x18, 0x924}, + { 0xac30, 1, 0x18, 0x924}, + { 0xac40, 1, 0x18, 0x924}, + { 0xac50, 1, 0x18, 0x924}, + { 0xac60, 1, 0x10, 0x924}, + { 0x10000, 9, 0x1f, 0x924}, + { 0x10024, 1, 0x7, 0x924}, + { 0x10028, 5, 0x1f, 0x924}, + { 0x1003c, 6, 0x7, 0x924}, + { 0x10054, 20, 0x1f, 0x924}, + { 0x100a4, 4, 0x7, 0x924}, + { 0x100b4, 11, 0x1f, 0x924}, + { 0x100e0, 4, 0x7, 0x924}, + { 0x100f0, 8, 0x1f, 0x924}, + { 0x10110, 6, 0x7, 0x924}, + { 0x10128, 110, 0x1f, 0x924}, + { 0x102e0, 4, 0x7, 0x924}, + { 0x102f0, 18, 0x1f, 0x924}, + { 0x10338, 20, 0x7, 0x924}, + { 0x10388, 10, 0x1f, 0x924}, + { 0x103d0, 2, 0x3, 0x1fff}, + { 0x103dc, 1, 0x3, 0x1fff}, + { 0x10400, 6, 0x7, 0x924}, + { 0x10418, 1, 0x1f, 0xfff}, + { 0x1041c, 1, 0x1f, 0x924}, + { 0x10420, 1, 0x1f, 0xfff}, + { 0x10424, 1, 0x1f, 0x924}, + { 0x10428, 1, 0x1f, 0xfff}, + { 0x1042c, 1, 0x1f, 0x924}, + { 0x10430, 10, 0x7, 0x924}, + { 0x10458, 2, 0x1f, 0x924}, + { 0x10460, 1, 0x1f, 0xfff}, + { 0x10464, 4, 0x1f, 0x924}, + { 0x10474, 1, 0x1f, 0xfff}, + { 0x10478, 14, 0x1f, 0x924}, + { 0x104b0, 12, 0x7, 0x924}, + { 0x104e0, 1, 0x1f, 0xfff}, + { 0x104e8, 1, 0x1f, 0x924}, + { 0x104ec, 1, 0x1f, 0xfff}, + { 0x104f4, 1, 0x1f, 0x924}, + { 0x104f8, 1, 0x1f, 0xfff}, + { 0x10500, 2, 0x1f, 0x924}, + { 0x10508, 1, 0x1f, 0xfff}, + { 0x1050c, 9, 0x1f, 0x924}, + { 0x10530, 1, 0x1f, 0xfff}, + { 0x10534, 1, 0x1f, 0x924}, + { 0x10538, 1, 0x1f, 0xfff}, + { 0x1053c, 3, 0x1f, 0x924}, + { 0x10548, 1, 0x1f, 0xfff}, + { 0x1054c, 3, 0x1f, 0x924}, + { 0x10558, 1, 0x1f, 0xfff}, + { 0x1055c, 123, 0x1f, 0x924}, + { 0x10750, 2, 0x7, 0x924}, + { 0x10760, 2, 0x7, 0x924}, + { 0x10770, 2, 0x7, 0x924}, + { 0x10780, 2, 0x7, 0x924}, + { 0x10790, 2, 0x1f, 0x924}, + { 0x107a0, 2, 0x7, 0x924}, + { 0x107b0, 2, 0x7, 0x924}, + { 0x107c0, 2, 0x7, 0x924}, + { 0x107d0, 2, 0x7, 0x924}, + { 0x107e0, 2, 0x1f, 0x924}, + { 0x10880, 2, 0x1f, 0x924}, + { 0x10900, 2, 0x1f, 0x924}, + { 0x16000, 1, 0x6, 0x924}, + { 0x16004, 25, 0x1e, 0x924}, + { 0x16070, 8, 0x1e, 0x924}, + { 0x16090, 4, 0xe, 0x924}, + { 0x160a0, 6, 0x1e, 0x924}, + { 0x160c0, 7, 0x1e, 0x924}, + { 0x160dc, 2, 0x6, 0x924}, + { 0x160e4, 6, 0x1e, 0x924}, + { 0x160fc, 4, 0x1e, 0x1fff}, + { 0x1610c, 2, 0x6, 0x924}, + { 0x16114, 6, 0x1e, 0x924}, + { 0x16140, 48, 0x1e, 0x1fff}, + { 0x16204, 5, 0x1e, 0x924}, + { 0x18000, 1, 0x1e, 0x924}, + { 0x18008, 1, 0x1e, 0x924}, + { 0x18010, 35, 0x1c, 0x924}, + { 0x180a4, 2, 0x1c, 0x924}, + { 0x180c0, 9, 0x1c, 0x924}, + { 0x180e4, 1, 0xc, 0x924}, + { 0x180e8, 2, 0x1c, 0x924}, + { 0x180f0, 1, 0xc, 0x924}, + { 0x180f4, 79, 0x1c, 0x924}, + { 0x18230, 1, 0xc, 0x924}, + { 0x18234, 2, 0x1c, 0x924}, + { 0x1823c, 1, 0xc, 0x924}, + { 0x18240, 13, 0x1c, 0x924}, + { 0x18274, 1, 0x4, 0x924}, + { 0x18278, 12, 0x1c, 0x924}, + { 0x182a8, 1, 0x1c, 0xfff}, + { 0x182ac, 3, 0x1c, 0x924}, + { 0x182b8, 1, 0x1c, 0xfff}, + { 0x182bc, 19, 0x1c, 0x924}, + { 0x18308, 1, 0x1c, 0xfff}, + { 0x1830c, 3, 0x1c, 0x924}, + { 0x18318, 1, 0x1c, 0xfff}, + { 0x1831c, 7, 0x1c, 0x924}, + { 0x18338, 1, 0x1c, 0xfff}, + { 0x1833c, 3, 0x1c, 0x924}, + { 0x18348, 1, 0x1c, 0xfff}, + { 0x1834c, 28, 0x1c, 0x924}, + { 0x183bc, 2, 0x1c, 0x1fff}, + { 0x183c8, 3, 0x1c, 0x1fff}, + { 0x183d8, 1, 0x1c, 0x1fff}, + { 0x18440, 48, 0x1c, 0x1fff}, + { 0x18500, 15, 0x1c, 0x924}, + { 0x18570, 1, 0x18, 0xfff}, + { 0x18574, 1, 0x18, 0x924}, + { 0x18578, 1, 0x18, 0xfff}, + { 0x1857c, 4, 0x18, 0x924}, + { 0x1858c, 1, 0x18, 0xfff}, + { 0x18590, 1, 0x18, 0x924}, + { 0x18594, 1, 0x18, 0xfff}, + { 0x18598, 32, 0x18, 0x924}, + { 0x18618, 5, 0x10, 0x924}, + { 0x1862c, 4, 0x10, 0xfff}, + { 0x1863c, 16, 0x10, 0x924}, + { 0x18680, 44, 0x10, 0x924}, + { 0x18748, 12, 0x10, 0x924}, + { 0x18788, 1, 0x10, 0x924}, + { 0x1879c, 6, 0x10, 0x924}, + { 0x187c4, 51, 0x10, 0x924}, + { 0x18a00, 48, 0x10, 0x924}, + { 0x20000, 24, 0x1f, 0x924}, + { 0x20060, 8, 0x1f, 0x9e4}, + { 0x20080, 94, 0x1f, 0x924}, + { 0x201f8, 1, 0x3, 0x924}, + { 0x201fc, 1, 0x1f, 0x924}, + { 0x20200, 1, 0x3, 0x924}, + { 0x20204, 1, 0x1f, 0x924}, + { 0x20208, 1, 0x3, 0x924}, + { 0x2020c, 4, 0x1f, 0x924}, + { 0x2021c, 11, 0x1f, 0xfff}, + { 0x20248, 24, 0x1f, 0x924}, + { 0x202b8, 2, 0x1f, 0x1fff}, + { 0x202c4, 1, 0x1f, 0x1fff}, + { 0x202c8, 1, 0x1c, 0x924}, + { 0x202d8, 4, 0x1c, 0x924}, + { 0x202f0, 1, 0x10, 0x924}, + { 0x20400, 1, 0x1f, 0x924}, + { 0x20404, 1, 0x1f, 0xfff}, + { 0x2040c, 2, 0x1f, 0xfff}, + { 0x20414, 2, 0x1f, 0x924}, + { 0x2041c, 2, 0x1f, 0xfff}, + { 0x20424, 2, 0x1f, 0x924}, + { 0x2042c, 18, 0x1e, 0x924}, + { 0x20480, 1, 0x1f, 0x924}, + { 0x20500, 1, 0x1f, 0x924}, + { 0x20600, 1, 0x1f, 0x924}, + { 0x28000, 1, 0x1f, 0x9e4}, + { 0x28004, 255, 0x1f, 0x180}, + { 0x28400, 1, 0x1f, 0x1c0}, + { 0x28404, 255, 0x1f, 0x180}, + { 0x28800, 1, 0x1f, 0x1c0}, + { 0x28804, 255, 0x1f, 0x180}, + { 0x28c00, 1, 0x1f, 0x1c0}, + { 0x28c04, 255, 0x1f, 0x180}, + { 0x29000, 1, 0x1f, 0x1c0}, + { 0x29004, 255, 0x1f, 0x180}, + { 0x29400, 1, 0x1f, 0x1c0}, + { 0x29404, 255, 0x1f, 0x180}, + { 0x29800, 1, 0x1f, 0x1c0}, + { 0x29804, 255, 0x1f, 0x180}, + { 0x29c00, 1, 0x1f, 0x1c0}, + { 0x29c04, 255, 0x1f, 0x180}, + { 0x2a000, 1, 0x1f, 0x1c0}, + { 0x2a004, 255, 0x1f, 0x180}, + { 0x2a400, 1, 0x1f, 0x1c0}, + { 0x2a404, 255, 0x1f, 0x180}, + { 0x2a800, 1, 0x1f, 0x1c0}, + { 0x2a804, 255, 0x1f, 0x180}, + { 0x2ac00, 1, 0x1f, 0x1c0}, + { 0x2ac04, 255, 0x1f, 0x180}, + { 0x2b000, 1, 0x1f, 0x1c0}, + { 0x2b004, 255, 0x1f, 0x180}, + { 0x2b400, 1, 0x1f, 0x1c0}, + { 0x2b404, 255, 0x1f, 0x180}, + { 0x2b800, 1, 0x1f, 0x1c0}, + { 0x2b804, 255, 0x1f, 0x180}, + { 0x2bc00, 1, 0x1f, 0x1c0}, + { 0x2bc04, 255, 0x1f, 0x180}, + { 0x2c000, 1, 0x1f, 0x1c0}, + { 0x2c004, 255, 0x1f, 0x180}, + { 0x2c400, 1, 0x1f, 0x1c0}, + { 0x2c404, 255, 0x1f, 0x180}, + { 0x2c800, 1, 0x1f, 0x1c0}, + { 0x2c804, 255, 0x1f, 0x180}, + { 0x2cc00, 1, 0x1f, 0x1c0}, + { 0x2cc04, 255, 0x1f, 0x180}, + { 0x2d000, 1, 0x1f, 0x1c0}, + { 0x2d004, 255, 0x1f, 0x180}, + { 0x2d400, 1, 0x1f, 0x1c0}, + { 0x2d404, 255, 0x1f, 0x180}, + { 0x2d800, 1, 0x1f, 0x1c0}, + { 0x2d804, 255, 0x1f, 0x180}, + { 0x2dc00, 1, 0x1f, 0x1c0}, + { 0x2dc04, 255, 0x1f, 0x180}, + { 0x2e000, 1, 0x1f, 0x1c0}, + { 0x2e004, 255, 0x1f, 0x180}, + { 0x2e400, 1, 0x1f, 0x1c0}, + { 0x2e404, 255, 0x1f, 0x180}, + { 0x2e800, 1, 0x1f, 0x1c0}, + { 0x2e804, 255, 0x1f, 0x180}, + { 0x2ec00, 1, 0x1f, 0x1c0}, + { 0x2ec04, 255, 0x1f, 0x180}, + { 0x2f000, 1, 0x1f, 0x1c0}, + { 0x2f004, 255, 0x1f, 0x180}, + { 0x2f400, 1, 0x1f, 0x1c0}, + { 0x2f404, 255, 0x1f, 0x180}, + { 0x2f800, 1, 0x1f, 0x1c0}, + { 0x2f804, 255, 0x1f, 0x180}, + { 0x2fc00, 1, 0x1f, 0x1c0}, + { 0x2fc04, 255, 0x1f, 0x180}, + { 0x30000, 1, 0x1f, 0x9e4}, + { 0x30004, 255, 0x1f, 0x180}, + { 0x30400, 1, 0x1f, 0x1c0}, + { 0x30404, 255, 0x1f, 0x180}, + { 0x30800, 1, 0x1f, 0x1c0}, + { 0x30804, 255, 0x1f, 0x180}, + { 0x30c00, 1, 0x1f, 0x1c0}, + { 0x30c04, 255, 0x1f, 0x180}, + { 0x31000, 1, 0x1f, 0x1c0}, + { 0x31004, 255, 0x1f, 0x180}, + { 0x31400, 1, 0x1f, 0x1c0}, + { 0x31404, 255, 0x1f, 0x180}, + { 0x31800, 1, 0x1f, 0x1c0}, + { 0x31804, 255, 0x1f, 0x180}, + { 0x31c00, 1, 0x1f, 0x1c0}, + { 0x31c04, 255, 0x1f, 0x180}, + { 0x32000, 1, 0x1f, 0x1c0}, + { 0x32004, 255, 0x1f, 0x180}, + { 0x32400, 1, 0x1f, 0x1c0}, + { 0x32404, 255, 0x1f, 0x180}, + { 0x32800, 1, 0x1f, 0x1c0}, + { 0x32804, 255, 0x1f, 0x180}, + { 0x32c00, 1, 0x1f, 0x1c0}, + { 0x32c04, 255, 0x1f, 0x180}, + { 0x33000, 1, 0x1f, 0x1c0}, + { 0x33004, 255, 0x1f, 0x180}, + { 0x33400, 1, 0x1f, 0x1c0}, + { 0x33404, 255, 0x1f, 0x180}, + { 0x33800, 1, 0x1f, 0x1c0}, + { 0x33804, 255, 0x1f, 0x180}, + { 0x33c00, 1, 0x1f, 0x1c0}, + { 0x33c04, 255, 0x1f, 0x180}, + { 0x34000, 1, 0x1f, 0x1c0}, + { 0x34004, 255, 0x1f, 0x180}, + { 0x34400, 1, 0x1f, 0x1c0}, + { 0x34404, 255, 0x1f, 0x180}, + { 0x34800, 1, 0x1f, 0x1c0}, + { 0x34804, 255, 0x1f, 0x180}, + { 0x34c00, 1, 0x1f, 0x1c0}, + { 0x34c04, 255, 0x1f, 0x180}, + { 0x35000, 1, 0x1f, 0x1c0}, + { 0x35004, 255, 0x1f, 0x180}, + { 0x35400, 1, 0x1f, 0x1c0}, + { 0x35404, 255, 0x1f, 0x180}, + { 0x35800, 1, 0x1f, 0x1c0}, + { 0x35804, 255, 0x1f, 0x180}, + { 0x35c00, 1, 0x1f, 0x1c0}, + { 0x35c04, 255, 0x1f, 0x180}, + { 0x36000, 1, 0x1f, 0x1c0}, + { 0x36004, 255, 0x1f, 0x180}, + { 0x36400, 1, 0x1f, 0x1c0}, + { 0x36404, 255, 0x1f, 0x180}, + { 0x36800, 1, 0x1f, 0x1c0}, + { 0x36804, 255, 0x1f, 0x180}, + { 0x36c00, 1, 0x1f, 0x1c0}, + { 0x36c04, 255, 0x1f, 0x180}, + { 0x37000, 1, 0x1f, 0x1c0}, + { 0x37004, 255, 0x1f, 0x180}, + { 0x37400, 1, 0x1f, 0x1c0}, + { 0x37404, 255, 0x1f, 0x180}, + { 0x37800, 1, 0x1f, 0x1c0}, + { 0x37804, 255, 0x1f, 0x180}, + { 0x37c00, 1, 0x1f, 0x1c0}, + { 0x37c04, 255, 0x1f, 0x180}, + { 0x38000, 1, 0x1f, 0x1c0}, + { 0x38004, 255, 0x1f, 0x180}, + { 0x38400, 1, 0x1f, 0x1c0}, + { 0x38404, 255, 0x1f, 0x180}, + { 0x38800, 1, 0x1f, 0x1c0}, + { 0x38804, 255, 0x1f, 0x180}, + { 0x38c00, 1, 0x1f, 0x1c0}, + { 0x38c04, 255, 0x1f, 0x180}, + { 0x39000, 1, 0x1f, 0x1c0}, + { 0x39004, 255, 0x1f, 0x180}, + { 0x39400, 1, 0x1f, 0x1c0}, + { 0x39404, 255, 0x1f, 0x180}, + { 0x39800, 1, 0x1f, 0x1c0}, + { 0x39804, 255, 0x1f, 0x180}, + { 0x39c00, 1, 0x1f, 0x1c0}, + { 0x39c04, 255, 0x1f, 0x180}, + { 0x3a000, 1, 0x1f, 0x1c0}, + { 0x3a004, 255, 0x1f, 0x180}, + { 0x3a400, 1, 0x1f, 0x1c0}, + { 0x3a404, 255, 0x1f, 0x180}, + { 0x3a800, 1, 0x1f, 0x1c0}, + { 0x3a804, 255, 0x1f, 0x180}, + { 0x3ac00, 1, 0x1f, 0x1c0}, + { 0x3ac04, 255, 0x1f, 0x180}, + { 0x3b000, 1, 0x1f, 0x1c0}, + { 0x3b004, 255, 0x1f, 0x180}, + { 0x3b400, 1, 0x1f, 0x1c0}, + { 0x3b404, 255, 0x1f, 0x180}, + { 0x3b800, 1, 0x1f, 0x1c0}, + { 0x3b804, 255, 0x1f, 0x180}, + { 0x3bc00, 1, 0x1f, 0x1c0}, + { 0x3bc04, 255, 0x1f, 0x180}, + { 0x3c000, 1, 0x1f, 0x1c0}, + { 0x3c004, 255, 0x1f, 0x180}, + { 0x3c400, 1, 0x1f, 0x1c0}, + { 0x3c404, 255, 0x1f, 0x180}, + { 0x3c800, 1, 0x1f, 0x1c0}, + { 0x3c804, 255, 0x1f, 0x180}, + { 0x3cc00, 1, 0x1f, 0x1c0}, + { 0x3cc04, 255, 0x1f, 0x180}, + { 0x3d000, 1, 0x1f, 0x1c0}, + { 0x3d004, 255, 0x1f, 0x180}, + { 0x3d400, 1, 0x1f, 0x1c0}, + { 0x3d404, 255, 0x1f, 0x180}, + { 0x3d800, 1, 0x1f, 0x1c0}, + { 0x3d804, 255, 0x1f, 0x180}, + { 0x3dc00, 1, 0x1f, 0x1c0}, + { 0x3dc04, 255, 0x1f, 0x180}, + { 0x3e000, 1, 0x1f, 0x1c0}, + { 0x3e004, 255, 0x1f, 0x180}, + { 0x3e400, 1, 0x1f, 0x1c0}, + { 0x3e404, 255, 0x1f, 0x180}, + { 0x3e800, 1, 0x1f, 0x1c0}, + { 0x3e804, 255, 0x1f, 0x180}, + { 0x3ec00, 1, 0x1f, 0x1c0}, + { 0x3ec04, 255, 0x1f, 0x180}, + { 0x3f000, 1, 0x1f, 0x1c0}, + { 0x3f004, 255, 0x1f, 0x180}, + { 0x3f400, 1, 0x1f, 0x1c0}, + { 0x3f404, 255, 0x1f, 0x180}, + { 0x3f800, 1, 0x1f, 0x1c0}, + { 0x3f804, 255, 0x1f, 0x180}, + { 0x3fc00, 1, 0x1f, 0x1c0}, + { 0x3fc04, 255, 0x1f, 0x180}, + { 0x40000, 85, 0x1f, 0x924}, + { 0x40154, 13, 0x1f, 0xfff}, + { 0x40198, 2, 0x1f, 0x1fff}, + { 0x401a4, 1, 0x1f, 0x1fff}, + { 0x401a8, 8, 0x1e, 0x924}, + { 0x401c8, 1, 0x2, 0x924}, + { 0x401cc, 2, 0x1e, 0x924}, + { 0x401d4, 2, 0x1c, 0x924}, + { 0x40200, 4, 0x1f, 0x924}, + { 0x40220, 6, 0x1c, 0x924}, + { 0x40238, 8, 0xc, 0x924}, + { 0x40258, 4, 0x1c, 0x924}, + { 0x40268, 2, 0x18, 0x924}, + { 0x40270, 17, 0x10, 0x924}, + { 0x40400, 43, 0x1f, 0x924}, + { 0x404bc, 2, 0x1f, 0x1fff}, + { 0x404c8, 1, 0x1f, 0x1fff}, + { 0x404cc, 3, 0x1e, 0x924}, + { 0x404e0, 1, 0x1c, 0x924}, + { 0x40500, 2, 0x1f, 0x924}, + { 0x40510, 2, 0x1f, 0x924}, + { 0x40520, 2, 0x1f, 0x924}, + { 0x40530, 2, 0x1f, 0x924}, + { 0x40540, 2, 0x1f, 0x924}, + { 0x40550, 10, 0x1c, 0x924}, + { 0x40610, 2, 0x1c, 0x924}, + { 0x42000, 164, 0x1f, 0x924}, + { 0x422b0, 2, 0x1f, 0x1fff}, + { 0x422bc, 1, 0x1f, 0x1fff}, + { 0x422c0, 4, 0x1c, 0x924}, + { 0x422d4, 5, 0x1e, 0x924}, + { 0x422e8, 1, 0x1c, 0x924}, + { 0x42400, 49, 0x1f, 0x924}, + { 0x424c8, 32, 0x1f, 0x924}, + { 0x42548, 1, 0x1f, 0xfff}, + { 0x4254c, 1, 0x1f, 0x924}, + { 0x42550, 1, 0x1f, 0xfff}, + { 0x42554, 1, 0x1f, 0x924}, + { 0x42558, 1, 0x1f, 0xfff}, + { 0x4255c, 1, 0x1f, 0x924}, + { 0x42568, 2, 0x1f, 0x924}, + { 0x42640, 5, 0x1c, 0x924}, + { 0x42800, 1, 0x1f, 0x924}, + { 0x50000, 1, 0x1f, 0x1fff}, + { 0x50004, 19, 0x1f, 0x924}, + { 0x50050, 8, 0x1f, 0x93c}, + { 0x50070, 60, 0x1f, 0x924}, + { 0x50160, 8, 0x1f, 0xfff}, + { 0x50180, 20, 0x1f, 0x924}, + { 0x501e0, 2, 0x1f, 0x1fff}, + { 0x501ec, 1, 0x1f, 0x1fff}, + { 0x501f0, 4, 0x1e, 0x924}, + { 0x50200, 1, 0x1f, 0x924}, + { 0x50204, 1, 0x1f, 0xfff}, + { 0x5020c, 2, 0x1f, 0xfff}, + { 0x50214, 2, 0x1f, 0x924}, + { 0x5021c, 1, 0x1f, 0xfff}, + { 0x50220, 2, 0x1f, 0x924}, + { 0x50228, 6, 0x1e, 0x924}, + { 0x50240, 1, 0x1f, 0x924}, + { 0x50280, 1, 0x1f, 0x924}, + { 0x50300, 1, 0x1c, 0x924}, + { 0x5030c, 1, 0x1c, 0x924}, + { 0x50318, 1, 0x1c, 0x934}, + { 0x5031c, 1, 0x1c, 0x924}, + { 0x50320, 2, 0x1c, 0x934}, + { 0x50330, 1, 0x10, 0x924}, + { 0x52000, 1, 0x1f, 0x924}, + { 0x54000, 1, 0x1f, 0x93c}, + { 0x54004, 255, 0x1f, 0x30}, + { 0x54400, 1, 0x1f, 0x38}, + { 0x54404, 255, 0x1f, 0x30}, + { 0x54800, 1, 0x1f, 0x38}, + { 0x54804, 255, 0x1f, 0x30}, + { 0x54c00, 1, 0x1f, 0x38}, + { 0x54c04, 255, 0x1f, 0x30}, + { 0x55000, 1, 0x1f, 0x38}, + { 0x55004, 255, 0x1f, 0x30}, + { 0x55400, 1, 0x1f, 0x38}, + { 0x55404, 255, 0x1f, 0x30}, + { 0x55800, 1, 0x1f, 0x38}, + { 0x55804, 255, 0x1f, 0x30}, + { 0x55c00, 1, 0x1f, 0x38}, + { 0x55c04, 255, 0x1f, 0x30}, + { 0x56000, 1, 0x1f, 0x38}, + { 0x56004, 255, 0x1f, 0x30}, + { 0x56400, 1, 0x1f, 0x38}, + { 0x56404, 255, 0x1f, 0x30}, + { 0x56800, 1, 0x1f, 0x38}, + { 0x56804, 255, 0x1f, 0x30}, + { 0x56c00, 1, 0x1f, 0x38}, + { 0x56c04, 255, 0x1f, 0x30}, + { 0x57000, 1, 0x1f, 0x38}, + { 0x57004, 255, 0x1f, 0x30}, + { 0x58000, 1, 0x1f, 0x934}, + { 0x58004, 8191, 0x3, 0x30}, + { 0x60000, 26, 0x1f, 0x924}, + { 0x60068, 8, 0x3, 0x924}, + { 0x60088, 2, 0x1f, 0x924}, + { 0x60090, 1, 0x1f, 0xfff}, + { 0x60094, 9, 0x1f, 0x924}, + { 0x600b8, 9, 0x3, 0x924}, + { 0x600dc, 1, 0x1f, 0x924}, + { 0x600e0, 5, 0x3, 0x924}, + { 0x600f4, 1, 0x7, 0x924}, + { 0x600f8, 1, 0x3, 0x924}, + { 0x600fc, 8, 0x1f, 0x924}, + { 0x6012c, 2, 0x1f, 0x1fff}, + { 0x60138, 1, 0x1f, 0x1fff}, + { 0x6013c, 24, 0x2, 0x924}, + { 0x6019c, 2, 0x1c, 0x924}, + { 0x601ac, 18, 0x1c, 0x924}, + { 0x60200, 1, 0x1f, 0xb6d}, + { 0x60204, 2, 0x1f, 0x249}, + { 0x60210, 13, 0x1c, 0x924}, + { 0x60244, 16, 0x10, 0x924}, + { 0x61000, 1, 0x1f, 0xb6d}, + { 0x61004, 511, 0x1f, 0x249}, + { 0x61800, 512, 0x18, 0x249}, + { 0x70000, 8, 0x1f, 0xb6d}, + { 0x70020, 8184, 0x1f, 0x249}, + { 0x78000, 8192, 0x18, 0x249}, + { 0x85000, 3, 0x1f, 0x1000}, + { 0x8501c, 7, 0x1f, 0x1000}, + { 0x85048, 1, 0x1f, 0x1000}, + { 0x85200, 32, 0x1f, 0x1000}, + { 0xa0000, 16384, 0x3, 0x1000}, + { 0xb0000, 16384, 0x2, 0x1000}, + { 0xc1000, 7, 0x1f, 0x924}, + { 0xc102c, 2, 0x1f, 0x1fff}, + { 0xc1038, 1, 0x1f, 0x1fff}, + { 0xc103c, 2, 0x1c, 0x924}, + { 0xc1800, 2, 0x1f, 0x924}, + { 0xc2000, 164, 0x1f, 0x924}, + { 0xc22b0, 2, 0x1f, 0x1fff}, + { 0xc22bc, 1, 0x1f, 0x1fff}, + { 0xc22c0, 5, 0x1c, 0x924}, + { 0xc22d8, 4, 0x1c, 0x924}, + { 0xc2400, 49, 0x1f, 0x924}, + { 0xc24c8, 32, 0x1f, 0x924}, + { 0xc2548, 1, 0x1f, 0xfff}, + { 0xc254c, 1, 0x1f, 0x924}, + { 0xc2550, 1, 0x1f, 0xfff}, + { 0xc2554, 1, 0x1f, 0x924}, + { 0xc2558, 1, 0x1f, 0xfff}, + { 0xc255c, 1, 0x1f, 0x924}, + { 0xc2568, 2, 0x1f, 0x924}, + { 0xc2600, 1, 0x1f, 0x924}, + { 0xc4000, 165, 0x1f, 0x924}, + { 0xc42b4, 2, 0x1f, 0x1fff}, + { 0xc42c0, 1, 0x1f, 0x1fff}, + { 0xc42d8, 2, 0x1c, 0x924}, + { 0xc42e0, 7, 0x1e, 0x924}, + { 0xc42fc, 1, 0x1c, 0x924}, + { 0xc4400, 51, 0x1f, 0x924}, + { 0xc44d0, 32, 0x1f, 0x924}, + { 0xc4550, 1, 0x1f, 0xfff}, + { 0xc4554, 1, 0x1f, 0x924}, + { 0xc4558, 1, 0x1f, 0xfff}, + { 0xc455c, 1, 0x1f, 0x924}, + { 0xc4560, 1, 0x1f, 0xfff}, + { 0xc4564, 1, 0x1f, 0x924}, + { 0xc4570, 2, 0x1f, 0x924}, + { 0xc4578, 5, 0x1c, 0x924}, + { 0xc4600, 1, 0x1f, 0x924}, + { 0xd0000, 19, 0x1f, 0x924}, + { 0xd004c, 8, 0x1f, 0x1927}, + { 0xd006c, 64, 0x1f, 0x924}, + { 0xd016c, 8, 0x1f, 0xfff}, + { 0xd018c, 19, 0x1f, 0x924}, + { 0xd01e8, 2, 0x1f, 0x1fff}, + { 0xd01f4, 1, 0x1f, 0x1fff}, + { 0xd01fc, 1, 0x1c, 0x924}, + { 0xd0200, 1, 0x1f, 0x924}, + { 0xd0204, 1, 0x1f, 0xfff}, + { 0xd020c, 3, 0x1f, 0xfff}, + { 0xd0218, 4, 0x1f, 0x924}, + { 0xd0228, 18, 0x1e, 0x924}, + { 0xd0280, 1, 0x1f, 0x924}, + { 0xd0300, 1, 0x1f, 0x924}, + { 0xd0400, 1, 0x1f, 0x924}, + { 0xd0818, 1, 0x10, 0x924}, + { 0xd4000, 1, 0x1f, 0x1927}, + { 0xd4004, 255, 0x1f, 0x6}, + { 0xd4400, 1, 0x1f, 0x1007}, + { 0xd4404, 255, 0x1f, 0x6}, + { 0xd4800, 1, 0x1f, 0x1007}, + { 0xd4804, 255, 0x1f, 0x6}, + { 0xd4c00, 1, 0x1f, 0x1007}, + { 0xd4c04, 255, 0x1f, 0x6}, + { 0xd5000, 1, 0x1f, 0x1007}, + { 0xd5004, 255, 0x1f, 0x6}, + { 0xd5400, 1, 0x1f, 0x1007}, + { 0xd5404, 255, 0x1f, 0x6}, + { 0xd5800, 1, 0x1f, 0x1007}, + { 0xd5804, 255, 0x1f, 0x6}, + { 0xd5c00, 1, 0x1f, 0x1007}, + { 0xd5c04, 255, 0x1f, 0x6}, + { 0xd6000, 1, 0x1f, 0x1007}, + { 0xd6004, 255, 0x1f, 0x6}, + { 0xd6400, 1, 0x1f, 0x1007}, + { 0xd6404, 255, 0x1f, 0x6}, + { 0xd8000, 1, 0x1f, 0x1927}, + { 0xd8004, 255, 0x1f, 0x6}, + { 0xd8400, 1, 0x1f, 0x1007}, + { 0xd8404, 255, 0x1f, 0x6}, + { 0xd8800, 1, 0x1f, 0x1007}, + { 0xd8804, 255, 0x1f, 0x6}, + { 0xd8c00, 1, 0x1f, 0x1007}, + { 0xd8c04, 255, 0x1f, 0x6}, + { 0xd9000, 1, 0x1f, 0x1007}, + { 0xd9004, 255, 0x1f, 0x6}, + { 0xd9400, 1, 0x1f, 0x1007}, + { 0xd9404, 255, 0x1f, 0x6}, + { 0xd9800, 1, 0x1f, 0x1007}, + { 0xd9804, 255, 0x1f, 0x6}, + { 0xd9c00, 1, 0x1f, 0x1007}, + { 0xd9c04, 255, 0x1f, 0x6}, + { 0xda000, 1, 0x1f, 0x1007}, + { 0xda004, 255, 0x1f, 0x6}, + { 0xda400, 1, 0x1f, 0x1007}, + { 0xda404, 255, 0x1f, 0x6}, + { 0xda800, 1, 0x1f, 0x1007}, + { 0xda804, 255, 0x1f, 0x6}, + { 0xdac00, 1, 0x1f, 0x1007}, + { 0xdac04, 255, 0x1f, 0x6}, + { 0xdb000, 1, 0x1f, 0x1007}, + { 0xdb004, 255, 0x1f, 0x6}, + { 0xdb400, 1, 0x1f, 0x1007}, + { 0xdb404, 255, 0x1f, 0x6}, + { 0xdb800, 1, 0x1f, 0x1007}, + { 0xdb804, 255, 0x1f, 0x6}, + { 0xdbc00, 1, 0x1f, 0x1007}, + { 0xdbc04, 255, 0x1f, 0x6}, + { 0xdc000, 1, 0x1f, 0x1007}, + { 0xdc004, 255, 0x1f, 0x6}, + { 0xdc400, 1, 0x1f, 0x1007}, + { 0xdc404, 255, 0x1f, 0x6}, + { 0xdc800, 1, 0x1f, 0x1007}, + { 0xdc804, 255, 0x1f, 0x6}, + { 0xdcc00, 1, 0x1f, 0x1007}, + { 0xdcc04, 255, 0x1f, 0x6}, + { 0xdd000, 1, 0x1f, 0x1007}, + { 0xdd004, 255, 0x1f, 0x6}, + { 0xdd400, 1, 0x1f, 0x1007}, + { 0xdd404, 255, 0x1f, 0x6}, + { 0xdd800, 1, 0x1f, 0x1007}, + { 0xdd804, 255, 0x1f, 0x6}, + { 0xddc00, 1, 0x1f, 0x1007}, + { 0xddc04, 255, 0x1f, 0x6}, + { 0xde000, 1, 0x1f, 0x1007}, + { 0xde004, 255, 0x1f, 0x6}, + { 0xde400, 1, 0x1f, 0x1007}, + { 0xde404, 255, 0x1f, 0x6}, + { 0xde800, 1, 0x1f, 0x1007}, + { 0xde804, 255, 0x1f, 0x6}, + { 0xdec00, 1, 0x1f, 0x1007}, + { 0xdec04, 255, 0x1f, 0x6}, + { 0xdf000, 1, 0x1f, 0x1007}, + { 0xdf004, 255, 0x1f, 0x6}, + { 0xdf400, 1, 0x1f, 0x1007}, + { 0xdf404, 255, 0x1f, 0x6}, + { 0xdf800, 1, 0x1f, 0x1007}, + { 0xdf804, 255, 0x1f, 0x6}, + { 0xdfc00, 1, 0x1f, 0x1007}, + { 0xdfc04, 255, 0x1f, 0x6}, + { 0xe0000, 21, 0x1f, 0x924}, + { 0xe0054, 8, 0x1f, 0xf24}, + { 0xe0074, 49, 0x1f, 0x924}, + { 0xe0138, 1, 0x3, 0x924}, + { 0xe013c, 6, 0x1f, 0x924}, + { 0xe0154, 8, 0x1f, 0xfff}, + { 0xe0174, 21, 0x1f, 0x924}, + { 0xe01d8, 2, 0x1f, 0x1fff}, + { 0xe01e4, 1, 0x1f, 0x1fff}, + { 0xe01f4, 1, 0x4, 0x924}, + { 0xe01f8, 1, 0x1c, 0x924}, + { 0xe0200, 1, 0x1f, 0x924}, + { 0xe0204, 1, 0x1f, 0xfff}, + { 0xe020c, 2, 0x1f, 0xfff}, + { 0xe0214, 2, 0x1f, 0x924}, + { 0xe021c, 2, 0x1f, 0xfff}, + { 0xe0224, 2, 0x1f, 0x924}, + { 0xe022c, 18, 0x1e, 0x924}, + { 0xe0280, 1, 0x1f, 0x924}, + { 0xe0300, 1, 0x1f, 0x924}, + { 0xe0400, 1, 0x10, 0x924}, + { 0xe1000, 1, 0x1f, 0x924}, + { 0xe2000, 1, 0x1f, 0xf24}, + { 0xe2004, 255, 0x1f, 0xc00}, + { 0xe2400, 1, 0x1f, 0xe00}, + { 0xe2404, 255, 0x1f, 0xc00}, + { 0xe2800, 1, 0x1f, 0xe00}, + { 0xe2804, 255, 0x1f, 0xc00}, + { 0xe2c00, 1, 0x1f, 0xe00}, + { 0xe2c04, 255, 0x1f, 0xc00}, + { 0xe3000, 1, 0x1f, 0xe00}, + { 0xe3004, 255, 0x1f, 0xc00}, + { 0xe3400, 1, 0x1f, 0xe00}, + { 0xe3404, 255, 0x1f, 0xc00}, + { 0xe3800, 1, 0x1f, 0xe00}, + { 0xe3804, 255, 0x1f, 0xc00}, + { 0xe3c00, 1, 0x1f, 0xe00}, + { 0xe3c04, 255, 0x1f, 0xc00}, + { 0xf0000, 1, 0x1f, 0xf24}, + { 0xf0004, 255, 0x1f, 0xc00}, + { 0xf0400, 1, 0x1f, 0xe00}, + { 0xf0404, 255, 0x1f, 0xc00}, + { 0xf0800, 1, 0x1f, 0xe00}, + { 0xf0804, 255, 0x1f, 0xc00}, + { 0xf0c00, 1, 0x1f, 0xe00}, + { 0xf0c04, 255, 0x1f, 0xc00}, + { 0xf1000, 1, 0x1f, 0xe00}, + { 0xf1004, 255, 0x1f, 0xc00}, + { 0xf1400, 1, 0x1f, 0xe00}, + { 0xf1404, 255, 0x1f, 0xc00}, + { 0xf1800, 1, 0x1f, 0xe00}, + { 0xf1804, 255, 0x1f, 0xc00}, + { 0xf1c00, 1, 0x1f, 0xe00}, + { 0xf1c04, 255, 0x1f, 0xc00}, + { 0xf2000, 1, 0x1f, 0xe00}, + { 0xf2004, 255, 0x1f, 0xc00}, + { 0xf2400, 1, 0x1f, 0xe00}, + { 0xf2404, 255, 0x1f, 0xc00}, + { 0xf2800, 1, 0x1f, 0xe00}, + { 0xf2804, 255, 0x1f, 0xc00}, + { 0xf2c00, 1, 0x1f, 0xe00}, + { 0xf2c04, 255, 0x1f, 0xc00}, + { 0xf3000, 1, 0x1f, 0xe00}, + { 0xf3004, 255, 0x1f, 0xc00}, + { 0xf3400, 1, 0x1f, 0xe00}, + { 0xf3404, 255, 0x1f, 0xc00}, + { 0xf3800, 1, 0x1f, 0xe00}, + { 0xf3804, 255, 0x1f, 0xc00}, + { 0xf3c00, 1, 0x1f, 0xe00}, + { 0xf3c04, 255, 0x1f, 0xc00}, + { 0xf4000, 1, 0x1f, 0xe00}, + { 0xf4004, 255, 0x1f, 0xc00}, + { 0xf4400, 1, 0x1f, 0xe00}, + { 0xf4404, 255, 0x1f, 0xc00}, + { 0xf4800, 1, 0x1f, 0xe00}, + { 0xf4804, 255, 0x1f, 0xc00}, + { 0xf4c00, 1, 0x1f, 0xe00}, + { 0xf4c04, 255, 0x1f, 0xc00}, + { 0xf5000, 1, 0x1f, 0xe00}, + { 0xf5004, 255, 0x1f, 0xc00}, + { 0xf5400, 1, 0x1f, 0xe00}, + { 0xf5404, 255, 0x1f, 0xc00}, + { 0xf5800, 1, 0x1f, 0xe00}, + { 0xf5804, 255, 0x1f, 0xc00}, + { 0xf5c00, 1, 0x1f, 0xe00}, + { 0xf5c04, 255, 0x1f, 0xc00}, + { 0xf6000, 1, 0x1f, 0xe00}, + { 0xf6004, 255, 0x1f, 0xc00}, + { 0xf6400, 1, 0x1f, 0xe00}, + { 0xf6404, 255, 0x1f, 0xc00}, + { 0xf6800, 1, 0x1f, 0xe00}, + { 0xf6804, 255, 0x1f, 0xc00}, + { 0xf6c00, 1, 0x1f, 0xe00}, + { 0xf6c04, 255, 0x1f, 0xc00}, + { 0xf7000, 1, 0x1f, 0xe00}, + { 0xf7004, 255, 0x1f, 0xc00}, + { 0xf7400, 1, 0x1f, 0xe00}, + { 0xf7404, 255, 0x1f, 0xc00}, + { 0xf7800, 1, 0x1f, 0xe00}, + { 0xf7804, 255, 0x1f, 0xc00}, + { 0xf7c00, 1, 0x1f, 0xe00}, + { 0xf7c04, 255, 0x1f, 0xc00}, + { 0xf8000, 1, 0x1f, 0xe00}, + { 0xf8004, 255, 0x1f, 0xc00}, + { 0xf8400, 1, 0x1f, 0xe00}, + { 0xf8404, 255, 0x1f, 0xc00}, + { 0xf8800, 1, 0x1f, 0xe00}, + { 0xf8804, 255, 0x1f, 0xc00}, + { 0xf8c00, 1, 0x1f, 0xe00}, + { 0xf8c04, 255, 0x1f, 0xc00}, + { 0xf9000, 1, 0x1f, 0xe00}, + { 0xf9004, 255, 0x1f, 0xc00}, + { 0xf9400, 1, 0x1f, 0xe00}, + { 0xf9404, 255, 0x1f, 0xc00}, + { 0xf9800, 1, 0x1f, 0xe00}, + { 0xf9804, 255, 0x1f, 0xc00}, + { 0xf9c00, 1, 0x1f, 0xe00}, + { 0xf9c04, 255, 0x1f, 0xc00}, + { 0xfa000, 1, 0x1f, 0xe00}, + { 0xfa004, 255, 0x1f, 0xc00}, + { 0xfa400, 1, 0x1f, 0xe00}, + { 0xfa404, 255, 0x1f, 0xc00}, + { 0xfa800, 1, 0x1f, 0xe00}, + { 0xfa804, 255, 0x1f, 0xc00}, + { 0xfac00, 1, 0x1f, 0xe00}, + { 0xfac04, 255, 0x1f, 0xc00}, + { 0xfb000, 1, 0x1f, 0xe00}, + { 0xfb004, 255, 0x1f, 0xc00}, + { 0xfb400, 1, 0x1f, 0xe00}, + { 0xfb404, 255, 0x1f, 0xc00}, + { 0xfb800, 1, 0x1f, 0xe00}, + { 0xfb804, 255, 0x1f, 0xc00}, + { 0xfbc00, 1, 0x1f, 0xe00}, + { 0xfbc04, 255, 0x1f, 0xc00}, + { 0xfc000, 1, 0x1f, 0xe00}, + { 0xfc004, 255, 0x1f, 0xc00}, + { 0xfc400, 1, 0x1f, 0xe00}, + { 0xfc404, 255, 0x1f, 0xc00}, + { 0xfc800, 1, 0x1f, 0xe00}, + { 0xfc804, 255, 0x1f, 0xc00}, + { 0xfcc00, 1, 0x1f, 0xe00}, + { 0xfcc04, 255, 0x1f, 0xc00}, + { 0xfd000, 1, 0x1f, 0xe00}, + { 0xfd004, 255, 0x1f, 0xc00}, + { 0xfd400, 1, 0x1f, 0xe00}, + { 0xfd404, 255, 0x1f, 0xc00}, + { 0xfd800, 1, 0x1f, 0xe00}, + { 0xfd804, 255, 0x1f, 0xc00}, + { 0xfdc00, 1, 0x1f, 0xe00}, + { 0xfdc04, 255, 0x1f, 0xc00}, + { 0xfe000, 1, 0x1f, 0xe00}, + { 0xfe004, 255, 0x1f, 0xc00}, + { 0xfe400, 1, 0x1f, 0xe00}, + { 0xfe404, 255, 0x1f, 0xc00}, + { 0xfe800, 1, 0x1f, 0xe00}, + { 0xfe804, 255, 0x1f, 0xc00}, + { 0xfec00, 1, 0x1f, 0xe00}, + { 0xfec04, 255, 0x1f, 0xc00}, + { 0xff000, 1, 0x1f, 0xe00}, + { 0xff004, 255, 0x1f, 0xc00}, + { 0xff400, 1, 0x1f, 0xe00}, + { 0xff404, 255, 0x1f, 0xc00}, + { 0xff800, 1, 0x1f, 0xe00}, + { 0xff804, 255, 0x1f, 0xc00}, + { 0xffc00, 1, 0x1f, 0xe00}, + { 0xffc04, 255, 0x1f, 0xc00}, + { 0x101000, 5, 0x1f, 0x924}, + { 0x101014, 1, 0x1f, 0xfff}, + { 0x101018, 6, 0x1f, 0x924}, + { 0x101040, 2, 0x1f, 0x1fff}, + { 0x10104c, 1, 0x1f, 0x1fff}, + { 0x101050, 1, 0x1e, 0x924}, + { 0x101054, 3, 0x1c, 0x924}, + { 0x101100, 1, 0x1f, 0x924}, + { 0x101800, 8, 0x1f, 0x924}, + { 0x102000, 18, 0x1f, 0x924}, + { 0x102058, 2, 0x1f, 0x1fff}, + { 0x102064, 1, 0x1f, 0x1fff}, + { 0x102068, 6, 0x1c, 0x924}, + { 0x102080, 16, 0x1f, 0xfff}, + { 0x1020c0, 1, 0x1f, 0x924}, + { 0x1020c8, 8, 0x2, 0x924}, + { 0x1020e8, 9, 0x1c, 0x924}, + { 0x102400, 1, 0x1f, 0x924}, + { 0x103000, 1, 0x1f, 0x924}, + { 0x103004, 2, 0x1f, 0xfff}, + { 0x10300c, 23, 0x1f, 0x924}, + { 0x103088, 2, 0x1f, 0x1fff}, + { 0x103094, 1, 0x1f, 0x1fff}, + { 0x103098, 1, 0x1e, 0x924}, + { 0x10309c, 2, 0x1e, 0xfff}, + { 0x1030a4, 2, 0x1e, 0x924}, + { 0x1030ac, 2, 0x1c, 0x924}, + { 0x1030b4, 1, 0x4, 0x924}, + { 0x1030b8, 2, 0x1c, 0xfff}, + { 0x1030c0, 3, 0x1c, 0x924}, + { 0x1030cc, 1, 0x1c, 0xfff}, + { 0x1030d0, 1, 0x1c, 0x924}, + { 0x1030d8, 2, 0x1c, 0x924}, + { 0x1030e0, 1, 0x1c, 0xfff}, + { 0x1030e4, 5, 0x1c, 0x924}, + { 0x103400, 136, 0x1c, 0x1fff}, + { 0x103800, 8, 0x1f, 0x924}, + { 0x104000, 1, 0x1f, 0x924}, + { 0x104004, 1, 0x1f, 0xfff}, + { 0x104008, 4, 0x1f, 0x924}, + { 0x104018, 1, 0x1f, 0xfff}, + { 0x10401c, 1, 0x1f, 0x924}, + { 0x104020, 1, 0x1f, 0xfff}, + { 0x104024, 6, 0x1f, 0x924}, + { 0x10403c, 1, 0x1f, 0xfff}, + { 0x104040, 47, 0x1f, 0x924}, + { 0x10410c, 2, 0x1f, 0x1fff}, + { 0x104118, 1, 0x1f, 0x1fff}, + { 0x10411c, 16, 0x1c, 0x924}, + { 0x104200, 17, 0x1f, 0x924}, + { 0x104400, 1, 0x1f, 0x1fff}, + { 0x104404, 63, 0x1f, 0xfff}, + { 0x104500, 192, 0x1f, 0xdb6}, + { 0x104800, 1, 0x1f, 0x1fff}, + { 0x104804, 63, 0x1f, 0xfff}, + { 0x104900, 192, 0x1f, 0xdb6}, + { 0x105000, 4, 0x1f, 0x1fff}, + { 0x105010, 252, 0x1f, 0xfff}, + { 0x105400, 768, 0x1f, 0xdb6}, + { 0x107000, 7, 0x1c, 0x924}, + { 0x10701c, 1, 0x18, 0x924}, + { 0x108000, 33, 0x3, 0x924}, + { 0x1080ac, 5, 0x2, 0x924}, + { 0x108100, 5, 0x3, 0x924}, + { 0x108120, 5, 0x3, 0x924}, + { 0x108200, 74, 0x3, 0x924}, + { 0x108400, 74, 0x3, 0x924}, + { 0x108800, 152, 0x3, 0x924}, + { 0x110000, 111, 0x1c, 0x924}, + { 0x1101cc, 2, 0x1c, 0x1fff}, + { 0x1101d8, 1, 0x1c, 0x1fff}, + { 0x1101dc, 1, 0x18, 0x924}, + { 0x110200, 4, 0x1c, 0x924}, + { 0x120000, 92, 0x1f, 0x924}, + { 0x120170, 2, 0x3, 0x924}, + { 0x120178, 14, 0x1f, 0x924}, + { 0x1201b0, 2, 0x1f, 0xfff}, + { 0x1201b8, 93, 0x1f, 0x924}, + { 0x12032c, 1, 0x1f, 0xfff}, + { 0x120330, 15, 0x1f, 0x924}, + { 0x12036c, 3, 0x1f, 0xfff}, + { 0x120378, 36, 0x1f, 0x924}, + { 0x120408, 2, 0x1f, 0xfff}, + { 0x120410, 1, 0x1f, 0x924}, + { 0x120414, 15, 0x1f, 0xfff}, + { 0x120450, 10, 0x1f, 0x924}, + { 0x120478, 2, 0x1f, 0xfff}, + { 0x120480, 43, 0x1f, 0x924}, + { 0x12052c, 1, 0x1f, 0xfff}, + { 0x120530, 5, 0x1f, 0x924}, + { 0x120544, 4, 0x3, 0x924}, + { 0x120554, 4, 0x1f, 0x924}, + { 0x120564, 2, 0x1f, 0xfff}, + { 0x12057c, 2, 0x1f, 0x1fff}, + { 0x120588, 3, 0x1f, 0x1fff}, + { 0x120598, 1, 0x1f, 0x1fff}, + { 0x12059c, 22, 0x1e, 0x924}, + { 0x1205f4, 1, 0x6, 0x924}, + { 0x1205f8, 4, 0x1c, 0x924}, + { 0x120618, 1, 0x1c, 0x924}, + { 0x12061c, 31, 0x1e, 0x924}, + { 0x120698, 3, 0x1c, 0x924}, + { 0x1206a4, 1, 0x4, 0x924}, + { 0x1206a8, 1, 0x1c, 0x924}, + { 0x1206b0, 38, 0x1c, 0x924}, + { 0x120748, 1, 0x1c, 0xfff}, + { 0x12074c, 11, 0x1c, 0x924}, + { 0x120778, 2, 0x1c, 0xfff}, + { 0x120780, 23, 0x1c, 0x924}, + { 0x1207dc, 1, 0x4, 0x924}, + { 0x1207fc, 1, 0x1c, 0x924}, + { 0x12080c, 2, 0x1f, 0xfff}, + { 0x120814, 1, 0x1f, 0x924}, + { 0x120818, 1, 0x1f, 0xfff}, + { 0x12081c, 1, 0x1f, 0x924}, + { 0x120820, 1, 0x1f, 0xfff}, + { 0x120824, 1, 0x1f, 0x924}, + { 0x120828, 1, 0x1f, 0xfff}, + { 0x12082c, 1, 0x1f, 0x924}, + { 0x120830, 1, 0x1f, 0xfff}, + { 0x120834, 1, 0x1f, 0x924}, + { 0x120838, 1, 0x1f, 0xfff}, + { 0x12083c, 1, 0x1f, 0x924}, + { 0x120840, 1, 0x1f, 0xfff}, + { 0x120844, 1, 0x1f, 0x924}, + { 0x120848, 1, 0x1f, 0xfff}, + { 0x12084c, 1, 0x1f, 0x924}, + { 0x120850, 1, 0x1f, 0xfff}, + { 0x120854, 1, 0x1f, 0x924}, + { 0x120858, 1, 0x1f, 0xfff}, + { 0x12085c, 1, 0x1f, 0x924}, + { 0x120860, 1, 0x1f, 0xfff}, + { 0x120864, 1, 0x1f, 0x924}, + { 0x120868, 1, 0x1f, 0xfff}, + { 0x12086c, 1, 0x1f, 0x924}, + { 0x120870, 1, 0x1f, 0xfff}, + { 0x120874, 1, 0x1f, 0x924}, + { 0x120878, 1, 0x1f, 0xfff}, + { 0x12087c, 1, 0x1f, 0x924}, + { 0x120880, 1, 0x1f, 0xfff}, + { 0x120884, 1, 0x1f, 0x924}, + { 0x120888, 1, 0x1f, 0xfff}, + { 0x12088c, 1, 0x1f, 0x924}, + { 0x120890, 1, 0x1f, 0xfff}, + { 0x120894, 1, 0x1f, 0x924}, + { 0x120898, 1, 0x1f, 0xfff}, + { 0x12089c, 1, 0x1f, 0x924}, + { 0x1208a0, 1, 0x1f, 0xfff}, + { 0x1208a4, 1, 0x1f, 0x924}, + { 0x1208a8, 1, 0x1f, 0xfff}, + { 0x1208ac, 1, 0x1f, 0x924}, + { 0x1208b0, 1, 0x1f, 0xfff}, + { 0x1208b4, 1, 0x1f, 0x924}, + { 0x1208b8, 1, 0x1f, 0xfff}, + { 0x1208bc, 1, 0x1f, 0x924}, + { 0x1208c0, 1, 0x1f, 0xfff}, + { 0x1208c4, 1, 0x1f, 0x924}, + { 0x1208c8, 1, 0x1f, 0xfff}, + { 0x1208cc, 1, 0x1f, 0x924}, + { 0x1208d0, 1, 0x1f, 0xfff}, + { 0x1208d4, 1, 0x1f, 0x924}, + { 0x1208d8, 1, 0x1f, 0xfff}, + { 0x1208dc, 1, 0x1f, 0x924}, + { 0x1208e0, 1, 0x1f, 0xfff}, + { 0x1208e4, 1, 0x1f, 0x924}, + { 0x1208e8, 1, 0x1f, 0xfff}, + { 0x1208ec, 1, 0x1f, 0x924}, + { 0x1208f0, 1, 0x1f, 0xfff}, + { 0x1208f4, 1, 0x1f, 0x924}, + { 0x1208f8, 1, 0x1f, 0xfff}, + { 0x1208fc, 1, 0x1f, 0x924}, + { 0x120900, 1, 0x1f, 0xfff}, + { 0x120904, 1, 0x1f, 0x924}, + { 0x120908, 1, 0x1f, 0xfff}, + { 0x12090c, 1, 0x1f, 0x924}, + { 0x120910, 7, 0x1c, 0x924}, + { 0x120930, 9, 0x1c, 0x924}, + { 0x12095c, 37, 0x18, 0x924}, + { 0x120a00, 2, 0x7, 0x924}, + { 0x120b00, 1, 0x18, 0x924}, + { 0x122000, 2, 0x1f, 0x924}, + { 0x122008, 2046, 0x1, 0x924}, + { 0x128000, 6144, 0x1e, 0x924}, + { 0x130000, 1, 0x1c, 0x1fff}, + { 0x130004, 11, 0x1c, 0x924}, + { 0x130030, 1, 0x1c, 0xfff}, + { 0x130034, 6, 0x1c, 0x924}, + { 0x13004c, 3, 0x1c, 0xfff}, + { 0x130058, 3, 0x1c, 0x924}, + { 0x130064, 2, 0x1c, 0xfff}, + { 0x13006c, 8, 0x1c, 0x924}, + { 0x13009c, 2, 0x1c, 0x1fff}, + { 0x1300a8, 1, 0x1c, 0x1fff}, + { 0x130100, 12, 0x1c, 0x924}, + { 0x130130, 1, 0x1c, 0xfff}, + { 0x130134, 14, 0x1c, 0x924}, + { 0x13016c, 1, 0x1c, 0xfff}, + { 0x130170, 1, 0x1c, 0x924}, + { 0x130180, 1, 0x1c, 0x924}, + { 0x130200, 1, 0x1c, 0x924}, + { 0x130280, 1, 0x1c, 0x924}, + { 0x130300, 1, 0x1c, 0xfff}, + { 0x130304, 4, 0x1c, 0x924}, + { 0x130380, 1, 0x1c, 0x924}, + { 0x130400, 1, 0x1c, 0x924}, + { 0x130480, 1, 0x1c, 0xfff}, + { 0x130484, 4, 0x1c, 0x924}, + { 0x130800, 72, 0x1c, 0x924}, + { 0x131000, 136, 0x1c, 0x924}, + { 0x132000, 148, 0x1c, 0x924}, + { 0x134000, 544, 0x1c, 0x924}, + { 0x140000, 1, 0x1f, 0x924}, + { 0x140004, 9, 0xf, 0x924}, + { 0x140028, 8, 0x1f, 0x924}, + { 0x140048, 5, 0xf, 0x924}, + { 0x14005c, 2, 0xf, 0xfff}, + { 0x140064, 3, 0xf, 0x924}, + { 0x140070, 1, 0x1f, 0x924}, + { 0x140074, 10, 0xf, 0x924}, + { 0x14009c, 1, 0x1f, 0x924}, + { 0x1400a0, 5, 0xf, 0x924}, + { 0x1400b4, 7, 0x1f, 0x924}, + { 0x1400d0, 2, 0xf, 0xfff}, + { 0x1400d8, 2, 0xf, 0x924}, + { 0x1400e0, 1, 0xf, 0xfff}, + { 0x1400e4, 5, 0xf, 0x924}, + { 0x1400f8, 2, 0x1f, 0x924}, + { 0x140100, 5, 0x3, 0x924}, + { 0x140114, 5, 0xf, 0x924}, + { 0x140128, 7, 0x1f, 0x924}, + { 0x140144, 9, 0xf, 0x924}, + { 0x140168, 8, 0x1f, 0x924}, + { 0x140188, 3, 0xf, 0x924}, + { 0x140194, 13, 0x1f, 0x924}, + { 0x1401d8, 2, 0x1f, 0x1fff}, + { 0x1401e4, 1, 0x1f, 0x1fff}, + { 0x140200, 6, 0xf, 0xfff}, + { 0x1402e0, 2, 0xc, 0x924}, + { 0x1402e8, 2, 0x1c, 0x924}, + { 0x1402f0, 9, 0xc, 0x924}, + { 0x140314, 9, 0x10, 0x924}, + { 0x140338, 7, 0x10, 0xfff}, + { 0x140354, 7, 0x10, 0x924}, + { 0x140370, 7, 0x10, 0xfff}, + { 0x14038c, 14, 0x10, 0x924}, + { 0x1404b0, 14, 0x10, 0x924}, + { 0x15c000, 2, 0x1e, 0x924}, + { 0x15c008, 5, 0x2, 0x924}, + { 0x15c020, 8, 0x1c, 0x924}, + { 0x15c040, 1, 0xc, 0x924}, + { 0x15c044, 2, 0x1c, 0x924}, + { 0x15c04c, 8, 0xc, 0x924}, + { 0x15c06c, 8, 0x1c, 0x924}, + { 0x15c090, 13, 0x1c, 0x924}, + { 0x15c0c8, 24, 0x1c, 0x924}, + { 0x15c128, 2, 0xc, 0x924}, + { 0x15c130, 1, 0x1c, 0x924}, + { 0x15c138, 6, 0x1c, 0x924}, + { 0x15c150, 2, 0x18, 0x924}, + { 0x15c158, 2, 0x8, 0x924}, + { 0x15c160, 23, 0x10, 0x924}, + { 0x15c1bc, 6, 0x10, 0xfff}, + { 0x15c1d4, 23, 0x10, 0x924}, + { 0x15c230, 7, 0x10, 0xfff}, + { 0x15c24c, 90, 0x10, 0x924}, + { 0x160004, 6, 0x18, 0x924}, + { 0x16003c, 1, 0x10, 0x924}, + { 0x160040, 6, 0x18, 0x924}, + { 0x16005c, 6, 0x18, 0x924}, + { 0x160074, 1, 0x10, 0x924}, + { 0x160078, 2, 0x18, 0x924}, + { 0x160300, 8, 0x18, 0x924}, + { 0x160330, 6, 0x18, 0x924}, + { 0x160404, 6, 0x18, 0x924}, + { 0x16043c, 1, 0x10, 0x924}, + { 0x160440, 6, 0x18, 0x924}, + { 0x16045c, 6, 0x18, 0x924}, + { 0x160474, 1, 0x10, 0x924}, + { 0x160478, 2, 0x18, 0x924}, + { 0x160700, 8, 0x18, 0x924}, + { 0x160730, 6, 0x18, 0x924}, + { 0x161000, 7, 0x1f, 0x924}, + { 0x16102c, 2, 0x1f, 0x1fff}, + { 0x161038, 1, 0x1f, 0x1fff}, + { 0x16103c, 2, 0x1c, 0x924}, + { 0x161800, 2, 0x1f, 0x924}, + { 0x162000, 54, 0x18, 0x924}, + { 0x162200, 60, 0x18, 0x924}, + { 0x162400, 54, 0x18, 0x924}, + { 0x162600, 60, 0x18, 0x924}, + { 0x162800, 54, 0x18, 0x924}, + { 0x162a00, 60, 0x18, 0x924}, + { 0x162c00, 54, 0x18, 0x924}, + { 0x162e00, 60, 0x18, 0x924}, + { 0x163000, 1, 0x18, 0x924}, + { 0x163008, 1, 0x18, 0x924}, + { 0x163010, 1, 0x18, 0x924}, + { 0x163018, 1, 0x18, 0x924}, + { 0x163020, 5, 0x18, 0x924}, + { 0x163038, 3, 0x18, 0x924}, + { 0x163048, 3, 0x18, 0x924}, + { 0x163058, 1, 0x18, 0x924}, + { 0x163060, 1, 0x18, 0x924}, + { 0x163068, 1, 0x18, 0x924}, + { 0x163070, 3, 0x18, 0x924}, + { 0x163080, 1, 0x18, 0x924}, + { 0x163088, 3, 0x18, 0x924}, + { 0x163098, 1, 0x18, 0x924}, + { 0x1630a0, 1, 0x18, 0x924}, + { 0x1630a8, 1, 0x18, 0x924}, + { 0x1630b0, 2, 0x10, 0x924}, + { 0x1630c0, 1, 0x18, 0x924}, + { 0x1630c8, 1, 0x18, 0x924}, + { 0x1630d0, 1, 0x18, 0x924}, + { 0x1630d8, 1, 0x18, 0x924}, + { 0x1630e0, 2, 0x18, 0x924}, + { 0x163110, 1, 0x18, 0x924}, + { 0x163120, 2, 0x18, 0x924}, + { 0x163420, 4, 0x18, 0x924}, + { 0x163438, 2, 0x18, 0x924}, + { 0x163488, 2, 0x18, 0x924}, + { 0x163520, 2, 0x18, 0x924}, + { 0x163800, 1, 0x18, 0x924}, + { 0x163808, 1, 0x18, 0x924}, + { 0x163810, 1, 0x18, 0x924}, + { 0x163818, 1, 0x18, 0x924}, + { 0x163820, 5, 0x18, 0x924}, + { 0x163838, 3, 0x18, 0x924}, + { 0x163848, 3, 0x18, 0x924}, + { 0x163858, 1, 0x18, 0x924}, + { 0x163860, 1, 0x18, 0x924}, + { 0x163868, 1, 0x18, 0x924}, + { 0x163870, 3, 0x18, 0x924}, + { 0x163880, 1, 0x18, 0x924}, + { 0x163888, 3, 0x18, 0x924}, + { 0x163898, 1, 0x18, 0x924}, + { 0x1638a0, 1, 0x18, 0x924}, + { 0x1638a8, 1, 0x18, 0x924}, + { 0x1638b0, 2, 0x10, 0x924}, + { 0x1638c0, 1, 0x18, 0x924}, + { 0x1638c8, 1, 0x18, 0x924}, + { 0x1638d0, 1, 0x18, 0x924}, + { 0x1638d8, 1, 0x18, 0x924}, + { 0x1638e0, 2, 0x18, 0x924}, + { 0x163910, 1, 0x18, 0x924}, + { 0x163920, 2, 0x18, 0x924}, + { 0x163c20, 4, 0x18, 0x924}, + { 0x163c38, 2, 0x18, 0x924}, + { 0x163c88, 2, 0x18, 0x924}, + { 0x163d20, 2, 0x18, 0x924}, + { 0x164000, 5, 0x1f, 0x924}, + { 0x164014, 2, 0x1f, 0xfff}, + { 0x16401c, 53, 0x1f, 0x924}, + { 0x164100, 2, 0x1f, 0x1fff}, + { 0x16410c, 1, 0x1f, 0x1fff}, + { 0x164110, 2, 0x1e, 0x924}, + { 0x164118, 15, 0x1c, 0x924}, + { 0x164200, 1, 0x1f, 0x924}, + { 0x164208, 1, 0x1f, 0x924}, + { 0x164210, 1, 0x1f, 0x924}, + { 0x164218, 1, 0x1f, 0x924}, + { 0x164220, 1, 0x1f, 0x924}, + { 0x164228, 1, 0x1f, 0x924}, + { 0x164230, 1, 0x1f, 0x924}, + { 0x164238, 1, 0x1f, 0x924}, + { 0x164240, 1, 0x1f, 0x924}, + { 0x164248, 1, 0x1f, 0x924}, + { 0x164250, 1, 0x1f, 0x924}, + { 0x164258, 1, 0x1f, 0x924}, + { 0x164260, 1, 0x1f, 0x924}, + { 0x164270, 2, 0x1f, 0x924}, + { 0x164280, 2, 0x1f, 0x924}, + { 0x164800, 2, 0x1f, 0x924}, + { 0x165000, 2, 0x1f, 0x924}, + { 0x166000, 164, 0x1f, 0x924}, + { 0x1662b0, 2, 0x1f, 0x1fff}, + { 0x1662bc, 1, 0x1f, 0x1fff}, + { 0x1662cc, 7, 0x1c, 0x924}, + { 0x166400, 49, 0x1f, 0x924}, + { 0x1664c8, 32, 0x1f, 0x924}, + { 0x166548, 1, 0x1f, 0xfff}, + { 0x16654c, 1, 0x1f, 0x924}, + { 0x166550, 1, 0x1f, 0xfff}, + { 0x166554, 1, 0x1f, 0x924}, + { 0x166558, 1, 0x1f, 0xfff}, + { 0x16655c, 1, 0x1f, 0x924}, + { 0x166568, 2, 0x1f, 0x924}, + { 0x166570, 5, 0x1c, 0x924}, + { 0x166800, 1, 0x1f, 0x924}, + { 0x168000, 1, 0x1f, 0xfff}, + { 0x168004, 1, 0x1f, 0x924}, + { 0x168008, 1, 0x1f, 0xfff}, + { 0x16800c, 1, 0x1f, 0x924}, + { 0x168010, 1, 0x1f, 0xfff}, + { 0x168014, 1, 0x1f, 0x924}, + { 0x168018, 1, 0x1f, 0xfff}, + { 0x16801c, 3, 0x1f, 0x924}, + { 0x168028, 2, 0x1f, 0xfff}, + { 0x168030, 10, 0x1f, 0x924}, + { 0x168058, 9, 0x1f, 0xfff}, + { 0x16807c, 106, 0x1f, 0x924}, + { 0x168224, 2, 0x3, 0x924}, + { 0x16822c, 3, 0x1f, 0x924}, + { 0x168238, 1, 0x1f, 0xfff}, + { 0x16823c, 25, 0x1f, 0x924}, + { 0x1682a0, 12, 0x3, 0x924}, + { 0x1682d0, 7, 0x1f, 0xfff}, + { 0x1682ec, 5, 0x1f, 0x924}, + { 0x168300, 2, 0x3, 0xfff}, + { 0x168308, 65, 0x1f, 0xfff}, + { 0x16840c, 1, 0x1f, 0x924}, + { 0x168410, 2, 0x1f, 0xfff}, + { 0x168418, 2, 0x3, 0x924}, + { 0x168420, 6, 0x1f, 0x924}, + { 0x168448, 2, 0x1f, 0x1fff}, + { 0x168454, 1, 0x1f, 0x1fff}, + { 0x168800, 19, 0x1f, 0x924}, + { 0x168900, 1, 0x1f, 0x924}, + { 0x168a00, 128, 0x1f, 0xfff}, + { 0x16a000, 1536, 0x1f, 0x924}, + { 0x16c000, 1536, 0x1f, 0x924}, + { 0x16e000, 16, 0x2, 0x924}, + { 0x16e040, 8, 0x1c, 0x924}, + { 0x16e100, 1, 0x2, 0x924}, + { 0x16e200, 2, 0x2, 0xfff}, + { 0x16e400, 1, 0x2, 0x924}, + { 0x16e404, 2, 0x2, 0xfff}, + { 0x16e40c, 94, 0x2, 0x924}, + { 0x16e584, 64, 0x2, 0xfff}, + { 0x16e684, 2, 0x1e, 0xfff}, + { 0x16e68c, 4, 0x2, 0xfff}, + { 0x16e69c, 8, 0x2, 0x924}, + { 0x16e6bc, 4, 0x1e, 0x924}, + { 0x16e6cc, 4, 0x2, 0x924}, + { 0x16e6e0, 2, 0x1c, 0x924}, + { 0x16e6e8, 5, 0xc, 0x924}, + { 0x16e6fc, 4, 0x1c, 0xfff}, + { 0x16e70c, 1, 0x1c, 0x924}, + { 0x16e768, 17, 0x1c, 0x924}, + { 0x16e7ac, 12, 0x10, 0xfff}, + { 0x170000, 24, 0x1f, 0x924}, + { 0x170060, 4, 0x3, 0x924}, + { 0x170070, 13, 0x1f, 0x924}, + { 0x1700a4, 1, 0x1f, 0xfff}, + { 0x1700a8, 1, 0x1f, 0x924}, + { 0x1700ac, 2, 0x1f, 0xfff}, + { 0x1700b4, 3, 0x1f, 0x924}, + { 0x1700c0, 1, 0x1f, 0xfff}, + { 0x1700c4, 44, 0x1f, 0x924}, + { 0x170184, 2, 0x1f, 0x1fff}, + { 0x170190, 1, 0x1f, 0x1fff}, + { 0x170194, 11, 0x1c, 0x924}, + { 0x1701c4, 1, 0x1c, 0x924}, + { 0x1701cc, 7, 0x1c, 0x924}, + { 0x1701e8, 1, 0x18, 0x924}, + { 0x1701ec, 1, 0x1c, 0x924}, + { 0x1701f4, 1, 0x1c, 0x924}, + { 0x170200, 4, 0x1f, 0x924}, + { 0x170214, 1, 0x1f, 0x924}, + { 0x170218, 77, 0x1c, 0x924}, + { 0x170400, 64, 0x1c, 0x924}, + { 0x178000, 1, 0x1f, 0x924}, + { 0x180000, 61, 0x1f, 0x924}, + { 0x180114, 2, 0x1f, 0x1fff}, + { 0x180120, 3, 0x1f, 0x1fff}, + { 0x180130, 1, 0x1f, 0x1fff}, + { 0x18013c, 2, 0x1e, 0x924}, + { 0x180200, 27, 0x1f, 0x924}, + { 0x18026c, 1, 0x1f, 0xfff}, + { 0x180270, 12, 0x1f, 0x924}, + { 0x1802a0, 1, 0x1f, 0xfff}, + { 0x1802a4, 17, 0x1f, 0x924}, + { 0x180340, 4, 0x1f, 0x924}, + { 0x180380, 1, 0x1c, 0x924}, + { 0x180388, 1, 0x1c, 0x924}, + { 0x180390, 1, 0x1c, 0x924}, + { 0x180398, 1, 0x1c, 0x924}, + { 0x1803a0, 5, 0x1c, 0x924}, + { 0x1803b4, 2, 0x18, 0x924}, + { 0x180400, 256, 0x3, 0xfff}, + { 0x181000, 4, 0x1f, 0x93c}, + { 0x181010, 1020, 0x1f, 0x38}, + { 0x182000, 4, 0x18, 0x924}, + { 0x1a0000, 1, 0x1f, 0x92c}, + { 0x1a0004, 5631, 0x1f, 0x8}, + { 0x1a5800, 2560, 0x1e, 0x8}, + { 0x1a8000, 1, 0x1f, 0x92c}, + { 0x1a8004, 8191, 0x1e, 0x8}, + { 0x1b0000, 1, 0x1f, 0x92c}, + { 0x1b0004, 15, 0x2, 0x8}, + { 0x1b0040, 1, 0x1e, 0x92c}, + { 0x1b0044, 239, 0x2, 0x8}, + { 0x1b0400, 1, 0x1f, 0x92c}, + { 0x1b0404, 255, 0x2, 0x8}, + { 0x1b0800, 1, 0x1f, 0x924}, + { 0x1b0840, 1, 0x1e, 0x924}, + { 0x1b0c00, 1, 0x1f, 0x1fff}, + { 0x1b1000, 1, 0x1f, 0x1fff}, + { 0x1b1040, 1, 0x1e, 0x1fff}, + { 0x1b1400, 1, 0x1f, 0x924}, + { 0x1b1440, 1, 0x1e, 0x924}, + { 0x1b1480, 1, 0x1e, 0x924}, + { 0x1b14c0, 1, 0x1e, 0x924}, + { 0x1b1800, 128, 0x1f, 0x10}, + { 0x1b1c00, 128, 0x1f, 0x10}, + { 0x1b2000, 1, 0x1f, 0xdb6}, + { 0x1b2400, 1, 0x1e, 0x92c}, + { 0x1b2404, 5631, 0x1c, 0x8}, + { 0x1b8000, 1, 0x1f, 0xfff}, + { 0x1b8040, 1, 0x1f, 0xfff}, + { 0x1b8080, 1, 0x1f, 0xfff}, + { 0x1b80c0, 1, 0x1f, 0xfff}, + { 0x1b8100, 1, 0x1f, 0x924}, + { 0x1b8140, 1, 0x1f, 0x924}, + { 0x1b8180, 1, 0x1f, 0x924}, + { 0x1b81c0, 1, 0x1f, 0x924}, + { 0x1b8200, 1, 0x1f, 0x924}, + { 0x1b8240, 1, 0x1f, 0x924}, + { 0x1b8280, 1, 0x1f, 0x924}, + { 0x1b82c0, 1, 0x1f, 0x924}, + { 0x1b8300, 1, 0x1f, 0x924}, + { 0x1b8340, 1, 0x1f, 0x924}, + { 0x1b8380, 1, 0x1f, 0x924}, + { 0x1b83c0, 1, 0x1f, 0x924}, + { 0x1b8400, 1, 0x1f, 0x924}, + { 0x1b8440, 1, 0x1f, 0x924}, + { 0x1b8480, 1, 0x1f, 0x924}, + { 0x1b84c0, 1, 0x1f, 0x924}, + { 0x1b8500, 1, 0x1f, 0x924}, + { 0x1b8540, 1, 0x1f, 0x924}, + { 0x1b8580, 1, 0x1f, 0x924}, + { 0x1b85c0, 19, 0x1c, 0x924}, + { 0x1b8800, 1, 0x1f, 0x924}, + { 0x1b8840, 1, 0x1f, 0x924}, + { 0x1b8880, 1, 0x1f, 0x924}, + { 0x1b88c0, 1, 0x1f, 0x924}, + { 0x1b8900, 1, 0x1f, 0x924}, + { 0x1b8940, 1, 0x1f, 0x924}, + { 0x1b8980, 1, 0x1f, 0x924}, + { 0x1b89c0, 1, 0x1f, 0x924}, + { 0x1b8a00, 1, 0x1f, 0x934}, + { 0x1b8a40, 1, 0x1f, 0x924}, + { 0x1b8a80, 1, 0x1f, 0x492}, + { 0x1b8ac0, 1, 0x1f, 0x924}, + { 0x1b8b00, 1, 0x1f, 0x924}, + { 0x1b8b40, 1, 0x1f, 0x924}, + { 0x1b8b80, 1, 0x1f, 0x924}, + { 0x1b8bc0, 1, 0x1f, 0x924}, + { 0x1b8c00, 1, 0x1f, 0x924}, + { 0x1b8c40, 1, 0x1f, 0x924}, + { 0x1b8c80, 1, 0x1f, 0x924}, + { 0x1b8cc0, 1, 0x1f, 0x924}, + { 0x1b8cc4, 1, 0x1c, 0x924}, + { 0x1b8d00, 1, 0x1f, 0x924}, + { 0x1b8d40, 1, 0x1f, 0x924}, + { 0x1b8d80, 1, 0x1f, 0x924}, + { 0x1b8dc0, 1, 0x1f, 0x924}, + { 0x1b8e00, 1, 0x1f, 0x924}, + { 0x1b8e40, 1, 0x1f, 0x924}, + { 0x1b8e80, 1, 0x1f, 0x924}, + { 0x1b8e84, 1, 0x1c, 0x924}, + { 0x1b8ec0, 1, 0x1e, 0x924}, + { 0x1b8f00, 1, 0x1e, 0x924}, + { 0x1b8f40, 1, 0x1e, 0x924}, + { 0x1b8f80, 1, 0x1e, 0x924}, + { 0x1b8fc0, 1, 0x1e, 0x924}, + { 0x1b8fd4, 5, 0x1c, 0x924}, + { 0x1b8fe8, 2, 0x18, 0x924}, + { 0x1b9000, 1, 0x1c, 0x924}, + { 0x1b9040, 3, 0x1c, 0x924}, + { 0x1b905c, 1, 0x18, 0x924}, + { 0x1b9064, 1, 0x10, 0x924}, + { 0x1b9080, 10, 0x10, 0x924}, + { 0x1c0000, 2, 0x1f, 0x924}, + { 0x200000, 65, 0x1f, 0x924}, + { 0x200124, 2, 0x1f, 0x1fff}, + { 0x200130, 3, 0x1f, 0x1fff}, + { 0x200140, 1, 0x1f, 0x1fff}, + { 0x20014c, 2, 0x1e, 0x924}, + { 0x200200, 27, 0x1f, 0x924}, + { 0x20026c, 1, 0x1f, 0xfff}, + { 0x200270, 12, 0x1f, 0x924}, + { 0x2002a0, 1, 0x1f, 0xfff}, + { 0x2002a4, 17, 0x1f, 0x924}, + { 0x200340, 4, 0x1f, 0x924}, + { 0x200380, 1, 0x1c, 0x924}, + { 0x200388, 1, 0x1c, 0x924}, + { 0x200390, 1, 0x1c, 0x924}, + { 0x200398, 1, 0x1c, 0x924}, + { 0x2003a0, 1, 0x1c, 0x924}, + { 0x2003a8, 2, 0x1c, 0x924}, + { 0x200400, 256, 0x3, 0xfff}, + { 0x202000, 4, 0x1f, 0x1927}, + { 0x202010, 2044, 0x1f, 0x1007}, + { 0x204000, 4, 0x18, 0x924}, + { 0x220000, 1, 0x1f, 0x925}, + { 0x220004, 5631, 0x1f, 0x1}, + { 0x225800, 2560, 0x1e, 0x1}, + { 0x228000, 1, 0x1f, 0x925}, + { 0x228004, 8191, 0x1e, 0x1}, + { 0x230000, 1, 0x1f, 0x925}, + { 0x230004, 15, 0x2, 0x1}, + { 0x230040, 1, 0x1e, 0x925}, + { 0x230044, 239, 0x2, 0x1}, + { 0x230400, 1, 0x1f, 0x925}, + { 0x230404, 255, 0x2, 0x1}, + { 0x230800, 1, 0x1f, 0x924}, + { 0x230840, 1, 0x1e, 0x924}, + { 0x230c00, 1, 0x1f, 0x924}, + { 0x231000, 1, 0x1f, 0x924}, + { 0x231040, 1, 0x1e, 0x924}, + { 0x231400, 1, 0x1f, 0x924}, + { 0x231440, 1, 0x1e, 0x924}, + { 0x231480, 1, 0x1e, 0x924}, + { 0x2314c0, 1, 0x1e, 0x924}, + { 0x231800, 128, 0x1f, 0x2}, + { 0x231c00, 128, 0x1f, 0x2}, + { 0x232000, 1, 0x1f, 0xdb6}, + { 0x232400, 1, 0x1e, 0x925}, + { 0x232404, 5631, 0x1c, 0x1}, + { 0x238000, 1, 0x1f, 0xfff}, + { 0x238040, 1, 0x1f, 0xfff}, + { 0x238080, 1, 0x1f, 0xfff}, + { 0x2380c0, 1, 0x1f, 0xfff}, + { 0x238100, 1, 0x1f, 0x924}, + { 0x238140, 1, 0x1f, 0x924}, + { 0x238180, 1, 0x1f, 0x924}, + { 0x2381c0, 1, 0x1f, 0x924}, + { 0x238200, 1, 0x1f, 0x924}, + { 0x238240, 1, 0x1f, 0x924}, + { 0x238280, 1, 0x1f, 0x924}, + { 0x2382c0, 1, 0x1f, 0x924}, + { 0x238300, 1, 0x1f, 0x924}, + { 0x238340, 1, 0x1f, 0x924}, + { 0x238380, 1, 0x1f, 0x924}, + { 0x2383c0, 1, 0x1f, 0x924}, + { 0x238400, 1, 0x1f, 0x924}, + { 0x238440, 1, 0x1f, 0x924}, + { 0x238480, 1, 0x1f, 0x924}, + { 0x2384c0, 1, 0x1f, 0x924}, + { 0x238500, 1, 0x1f, 0x924}, + { 0x238540, 1, 0x1f, 0x924}, + { 0x238580, 1, 0x1f, 0x924}, + { 0x2385c0, 19, 0x1c, 0x924}, + { 0x238800, 1, 0x1f, 0x924}, + { 0x238840, 1, 0x1f, 0x924}, + { 0x238880, 1, 0x1f, 0x924}, + { 0x2388c0, 1, 0x1f, 0x924}, + { 0x238900, 1, 0x1f, 0x924}, + { 0x238940, 1, 0x1f, 0x924}, + { 0x238980, 1, 0x1f, 0x924}, + { 0x2389c0, 1, 0x1f, 0x924}, + { 0x238a00, 1, 0x1f, 0x926}, + { 0x238a40, 1, 0x1f, 0x924}, + { 0x238a80, 1, 0x1f, 0x492}, + { 0x238ac0, 1, 0x1f, 0x924}, + { 0x238b00, 1, 0x1f, 0x924}, + { 0x238b40, 1, 0x1f, 0x924}, + { 0x238b80, 1, 0x1f, 0x924}, + { 0x238bc0, 1, 0x1f, 0x924}, + { 0x238c00, 1, 0x1f, 0x924}, + { 0x238c40, 1, 0x1f, 0x924}, + { 0x238c80, 1, 0x1f, 0x924}, + { 0x238cc0, 1, 0x1f, 0x924}, + { 0x238cc4, 1, 0x1c, 0x924}, + { 0x238d00, 1, 0x1f, 0x924}, + { 0x238d40, 1, 0x1f, 0x924}, + { 0x238d80, 1, 0x1f, 0x924}, + { 0x238dc0, 1, 0x1f, 0x924}, + { 0x238e00, 1, 0x1f, 0x924}, + { 0x238e40, 1, 0x1f, 0x924}, + { 0x238e80, 1, 0x1f, 0x924}, + { 0x238e84, 1, 0x1c, 0x924}, + { 0x238ec0, 1, 0x1e, 0x924}, + { 0x238f00, 1, 0x1e, 0x924}, + { 0x238f40, 1, 0x1e, 0x924}, + { 0x238f80, 1, 0x1e, 0x924}, + { 0x238fc0, 1, 0x1e, 0x924}, + { 0x238fd4, 5, 0x1c, 0x924}, + { 0x238fe8, 2, 0x18, 0x924}, + { 0x239000, 1, 0x1c, 0x924}, + { 0x239040, 3, 0x1c, 0x924}, + { 0x23905c, 1, 0x18, 0x924}, + { 0x239064, 1, 0x10, 0x924}, + { 0x239080, 10, 0x10, 0x924}, + { 0x240000, 2, 0x1f, 0x924}, + { 0x280000, 65, 0x1f, 0x924}, + { 0x280124, 2, 0x1f, 0x1fff}, + { 0x280130, 3, 0x1f, 0x1fff}, + { 0x280140, 1, 0x1f, 0x1fff}, + { 0x28014c, 2, 0x1e, 0x924}, + { 0x280200, 27, 0x1f, 0x924}, + { 0x28026c, 1, 0x1f, 0xfff}, + { 0x280270, 12, 0x1f, 0x924}, + { 0x2802a0, 1, 0x1f, 0xfff}, + { 0x2802a4, 17, 0x1f, 0x924}, + { 0x280340, 4, 0x1f, 0x924}, + { 0x280380, 1, 0x1c, 0x924}, + { 0x280388, 1, 0x1c, 0x924}, + { 0x280390, 1, 0x1c, 0x924}, + { 0x280398, 1, 0x1c, 0x924}, + { 0x2803a0, 1, 0x1c, 0x924}, + { 0x2803a8, 2, 0x1c, 0x924}, + { 0x280400, 256, 0x3, 0xfff}, + { 0x282000, 4, 0x1f, 0x9e4}, + { 0x282010, 2044, 0x1f, 0x1c0}, + { 0x284000, 4, 0x18, 0x924}, + { 0x2a0000, 1, 0x1f, 0x964}, + { 0x2a0004, 5631, 0x1f, 0x40}, + { 0x2a5800, 2560, 0x1e, 0x40}, + { 0x2a8000, 1, 0x1f, 0x964}, + { 0x2a8004, 8191, 0x1e, 0x40}, + { 0x2b0000, 1, 0x1f, 0x964}, + { 0x2b0004, 15, 0x2, 0x40}, + { 0x2b0040, 1, 0x1e, 0x964}, + { 0x2b0044, 239, 0x2, 0x40}, + { 0x2b0400, 1, 0x1f, 0x964}, + { 0x2b0404, 255, 0x2, 0x40}, + { 0x2b0800, 1, 0x1f, 0x924}, + { 0x2b0840, 1, 0x1e, 0x924}, + { 0x2b0c00, 1, 0x1f, 0x924}, + { 0x2b1000, 1, 0x1f, 0x924}, + { 0x2b1040, 1, 0x1e, 0x924}, + { 0x2b1400, 1, 0x1f, 0x924}, + { 0x2b1440, 1, 0x1e, 0x924}, + { 0x2b1480, 1, 0x1e, 0x924}, + { 0x2b14c0, 1, 0x1e, 0x924}, + { 0x2b1800, 128, 0x1f, 0x80}, + { 0x2b1c00, 128, 0x1f, 0x80}, + { 0x2b2000, 1, 0x1f, 0xdb6}, + { 0x2b2400, 1, 0x1e, 0x964}, + { 0x2b2404, 5631, 0x1c, 0x40}, + { 0x2b8000, 1, 0x1f, 0xfff}, + { 0x2b8040, 1, 0x1f, 0xfff}, + { 0x2b8080, 1, 0x1f, 0xfff}, + { 0x2b80c0, 1, 0x1f, 0x924}, + { 0x2b8100, 1, 0x1f, 0x924}, + { 0x2b8140, 1, 0x1f, 0x924}, + { 0x2b8180, 1, 0x1f, 0x924}, + { 0x2b81c0, 1, 0x1f, 0x924}, + { 0x2b8200, 1, 0x1f, 0x924}, + { 0x2b8240, 1, 0x1f, 0x924}, + { 0x2b8280, 1, 0x1f, 0x924}, + { 0x2b82c0, 1, 0x1f, 0x924}, + { 0x2b8300, 1, 0x1f, 0x924}, + { 0x2b8340, 1, 0x1f, 0x924}, + { 0x2b8380, 1, 0x1f, 0x924}, + { 0x2b83c0, 1, 0x1f, 0x924}, + { 0x2b8400, 1, 0x1f, 0x924}, + { 0x2b8440, 1, 0x1f, 0x924}, + { 0x2b8480, 1, 0x1f, 0x924}, + { 0x2b84c0, 1, 0x1f, 0x924}, + { 0x2b8500, 1, 0x1f, 0x924}, + { 0x2b8540, 1, 0x1f, 0x924}, + { 0x2b8580, 1, 0x1f, 0x924}, + { 0x2b85c0, 19, 0x1c, 0x924}, + { 0x2b8800, 1, 0x1f, 0x924}, + { 0x2b8840, 1, 0x1f, 0x924}, + { 0x2b8880, 1, 0x1f, 0x924}, + { 0x2b88c0, 1, 0x1f, 0x924}, + { 0x2b8900, 1, 0x1f, 0x924}, + { 0x2b8940, 1, 0x1f, 0x924}, + { 0x2b8980, 1, 0x1f, 0x924}, + { 0x2b89c0, 1, 0x1f, 0x924}, + { 0x2b8a00, 1, 0x1f, 0x9a4}, + { 0x2b8a40, 1, 0x1f, 0x924}, + { 0x2b8a80, 1, 0x1f, 0x492}, + { 0x2b8ac0, 1, 0x1f, 0x924}, + { 0x2b8b00, 1, 0x1f, 0x924}, + { 0x2b8b40, 1, 0x1f, 0x924}, + { 0x2b8b80, 1, 0x1f, 0x924}, + { 0x2b8bc0, 1, 0x1f, 0x924}, + { 0x2b8c00, 1, 0x1f, 0x924}, + { 0x2b8c40, 1, 0x1f, 0x924}, + { 0x2b8c80, 1, 0x1f, 0x924}, + { 0x2b8cc0, 1, 0x1f, 0x924}, + { 0x2b8cc4, 1, 0x1c, 0x924}, + { 0x2b8d00, 1, 0x1f, 0x924}, + { 0x2b8d40, 1, 0x1f, 0x924}, + { 0x2b8d80, 1, 0x1f, 0x924}, + { 0x2b8dc0, 1, 0x1f, 0x924}, + { 0x2b8e00, 1, 0x1f, 0x924}, + { 0x2b8e40, 1, 0x1f, 0x924}, + { 0x2b8e80, 1, 0x1f, 0x924}, + { 0x2b8e84, 1, 0x1c, 0x924}, + { 0x2b8ec0, 1, 0x1e, 0x924}, + { 0x2b8f00, 1, 0x1e, 0x924}, + { 0x2b8f40, 1, 0x1e, 0x924}, + { 0x2b8f80, 1, 0x1e, 0x924}, + { 0x2b8fc0, 1, 0x1e, 0x924}, + { 0x2b8fd4, 5, 0x1c, 0x924}, + { 0x2b8fe8, 2, 0x18, 0x924}, + { 0x2b9000, 1, 0x1c, 0x924}, + { 0x2b9040, 3, 0x1c, 0x924}, + { 0x2b905c, 1, 0x18, 0x924}, + { 0x2b9064, 1, 0x10, 0x924}, + { 0x2b9080, 10, 0x10, 0x924}, + { 0x2c0000, 2, 0x1f, 0x1fff}, + { 0x300000, 65, 0x1f, 0x924}, + { 0x300124, 2, 0x1f, 0x1fff}, + { 0x300130, 3, 0x1f, 0x1fff}, + { 0x300140, 1, 0x1f, 0x1fff}, + { 0x30014c, 2, 0x1e, 0x924}, + { 0x300200, 27, 0x1f, 0x924}, + { 0x30026c, 1, 0x1f, 0xfff}, + { 0x300270, 12, 0x1f, 0x924}, + { 0x3002a0, 1, 0x1f, 0xfff}, + { 0x3002a4, 17, 0x1f, 0x924}, + { 0x300340, 4, 0x1f, 0x924}, + { 0x300380, 1, 0x1c, 0x924}, + { 0x300388, 1, 0x1c, 0x924}, + { 0x300390, 1, 0x1c, 0x924}, + { 0x300398, 1, 0x1c, 0x924}, + { 0x3003a0, 1, 0x1c, 0x924}, + { 0x3003a8, 2, 0x1c, 0x924}, + { 0x300400, 256, 0x3, 0xfff}, + { 0x302000, 4, 0x1f, 0xf24}, + { 0x302010, 2044, 0x1f, 0xe00}, + { 0x304000, 4, 0x18, 0x924}, + { 0x320000, 1, 0x1f, 0xb24}, + { 0x320004, 5631, 0x1f, 0x200}, + { 0x325800, 2560, 0x1e, 0x200}, + { 0x328000, 1, 0x1f, 0xb24}, + { 0x328004, 8191, 0x1e, 0x200}, + { 0x330000, 1, 0x1f, 0xb24}, + { 0x330004, 15, 0x2, 0x200}, + { 0x330040, 1, 0x1e, 0xb24}, + { 0x330044, 239, 0x2, 0x200}, + { 0x330400, 1, 0x1f, 0xb24}, + { 0x330404, 255, 0x2, 0x200}, + { 0x330800, 1, 0x1f, 0x924}, + { 0x330840, 1, 0x1e, 0x924}, + { 0x330c00, 1, 0x1f, 0x924}, + { 0x331000, 1, 0x1f, 0x924}, + { 0x331040, 1, 0x1e, 0x924}, + { 0x331400, 1, 0x1f, 0x924}, + { 0x331440, 1, 0x1e, 0x924}, + { 0x331480, 1, 0x1e, 0x924}, + { 0x3314c0, 1, 0x1e, 0x924}, + { 0x331800, 128, 0x1f, 0x400}, + { 0x331c00, 128, 0x1f, 0x400}, + { 0x332000, 1, 0x1f, 0xdb6}, + { 0x332400, 1, 0x1e, 0xb24}, + { 0x332404, 5631, 0x1c, 0x200}, + { 0x338000, 1, 0x1f, 0xfff}, + { 0x338040, 1, 0x1f, 0xfff}, + { 0x338080, 1, 0x1f, 0xfff}, + { 0x3380c0, 1, 0x1f, 0xfff}, + { 0x338100, 1, 0x1f, 0x924}, + { 0x338140, 1, 0x1f, 0x924}, + { 0x338180, 1, 0x1f, 0x924}, + { 0x3381c0, 1, 0x1f, 0x924}, + { 0x338200, 1, 0x1f, 0x924}, + { 0x338240, 1, 0x1f, 0x924}, + { 0x338280, 1, 0x1f, 0x924}, + { 0x3382c0, 1, 0x1f, 0x924}, + { 0x338300, 1, 0x1f, 0x924}, + { 0x338340, 1, 0x1f, 0x924}, + { 0x338380, 1, 0x1f, 0x924}, + { 0x3383c0, 1, 0x1f, 0x924}, + { 0x338400, 1, 0x1f, 0x924}, + { 0x338440, 1, 0x1f, 0x924}, + { 0x338480, 1, 0x1f, 0x924}, + { 0x3384c0, 1, 0x1f, 0x924}, + { 0x338500, 1, 0x1f, 0x924}, + { 0x338540, 1, 0x1f, 0x924}, + { 0x338580, 1, 0x1f, 0x924}, + { 0x3385c0, 19, 0x1c, 0x924}, + { 0x338800, 1, 0x1f, 0x924}, + { 0x338840, 1, 0x1f, 0x924}, + { 0x338880, 1, 0x1f, 0x924}, + { 0x3388c0, 1, 0x1f, 0x924}, + { 0x338900, 1, 0x1f, 0x924}, + { 0x338940, 1, 0x1f, 0x924}, + { 0x338980, 1, 0x1f, 0x924}, + { 0x3389c0, 1, 0x1f, 0x924}, + { 0x338a00, 1, 0x1f, 0xd24}, + { 0x338a40, 1, 0x1f, 0x924}, + { 0x338a80, 1, 0x1f, 0x492}, + { 0x338ac0, 1, 0x1f, 0x924}, + { 0x338b00, 1, 0x1f, 0x924}, + { 0x338b40, 1, 0x1f, 0x924}, + { 0x338b80, 1, 0x1f, 0x924}, + { 0x338bc0, 1, 0x1f, 0x924}, + { 0x338c00, 1, 0x1f, 0x924}, + { 0x338c40, 1, 0x1f, 0x924}, + { 0x338c80, 1, 0x1f, 0x924}, + { 0x338cc0, 1, 0x1f, 0x924}, + { 0x338cc4, 1, 0x1c, 0x924}, + { 0x338d00, 1, 0x1f, 0x924}, + { 0x338d40, 1, 0x1f, 0x924}, + { 0x338d80, 1, 0x1f, 0x924}, + { 0x338dc0, 1, 0x1f, 0x924}, + { 0x338e00, 1, 0x1f, 0x924}, + { 0x338e40, 1, 0x1f, 0x924}, + { 0x338e80, 1, 0x1f, 0x924}, + { 0x338e84, 1, 0x1c, 0x924}, + { 0x338ec0, 1, 0x1e, 0x924}, + { 0x338f00, 1, 0x1e, 0x924}, + { 0x338f40, 1, 0x1e, 0x924}, + { 0x338f80, 1, 0x1e, 0x924}, + { 0x338fc0, 1, 0x1e, 0x924}, + { 0x338fd4, 5, 0x1c, 0x924}, + { 0x338fe8, 2, 0x18, 0x924}, + { 0x339000, 1, 0x1c, 0x924}, + { 0x339040, 3, 0x1c, 0x924}, + { 0x33905c, 1, 0x18, 0x924}, + { 0x339064, 1, 0x10, 0x924}, + { 0x339080, 10, 0x10, 0x924}, + { 0x340000, 2, 0x1f, 0x924}, + { 0x3a0000, 40960, 0x1c, 0x1000} }; -#define REGS_COUNT ARRAY_SIZE(reg_addrs) -static const struct dump_sign dump_sign_all = { 0x4e23fde1, 0x70017, 0x3a }; +#define REGS_COUNT ARRAY_SIZE(reg_addrs) + +static const struct reg_addr idle_reg_addrs[] = { + { 0x2104, 1, 0x1f, 0xfff}, + { 0x2110, 2, 0x1f, 0xfff}, + { 0x211c, 8, 0x1f, 0xfff}, + { 0x2814, 1, 0x1f, 0xfff}, + { 0x281c, 2, 0x1f, 0xfff}, + { 0x2854, 1, 0x1f, 0xfff}, + { 0x285c, 1, 0x1f, 0xfff}, + { 0x3040, 1, 0x1f, 0xfff}, + { 0x9010, 7, 0x1c, 0xfff}, + { 0x9030, 1, 0x1c, 0xfff}, + { 0x9068, 16, 0x1c, 0xfff}, + { 0x9230, 2, 0x1c, 0xfff}, + { 0x9244, 1, 0x1c, 0xfff}, + { 0x9298, 1, 0x1c, 0xfff}, + { 0x92a8, 1, 0x1c, 0x1fff}, + { 0xa38c, 1, 0x1f, 0x1fff}, + { 0xa3c4, 1, 0x1e, 0xfff}, + { 0xa404, 1, 0x1f, 0xfff}, + { 0xa408, 2, 0x1f, 0x1fff}, + { 0xa42c, 12, 0x1f, 0xfff}, + { 0xa580, 1, 0x1f, 0x1fff}, + { 0xa590, 1, 0x1f, 0x1fff}, + { 0xa600, 5, 0x1e, 0xfff}, + { 0xa618, 1, 0x1e, 0xfff}, + { 0xa714, 1, 0x1c, 0xfff}, + { 0xa720, 1, 0x1c, 0xfff}, + { 0xa750, 1, 0x1c, 0xfff}, + { 0xc09c, 1, 0x3, 0xfff}, + { 0x103b0, 1, 0x1f, 0xfff}, + { 0x103c0, 1, 0x1f, 0xfff}, + { 0x103d0, 1, 0x3, 0x1fff}, + { 0x10418, 1, 0x1f, 0xfff}, + { 0x10420, 1, 0x1f, 0xfff}, + { 0x10428, 1, 0x1f, 0xfff}, + { 0x10460, 1, 0x1f, 0xfff}, + { 0x10474, 1, 0x1f, 0xfff}, + { 0x104e0, 1, 0x1f, 0xfff}, + { 0x104ec, 1, 0x1f, 0xfff}, + { 0x104f8, 1, 0x1f, 0xfff}, + { 0x10508, 1, 0x1f, 0xfff}, + { 0x10530, 1, 0x1f, 0xfff}, + { 0x10538, 1, 0x1f, 0xfff}, + { 0x10548, 1, 0x1f, 0xfff}, + { 0x10558, 1, 0x1f, 0xfff}, + { 0x182a8, 1, 0x1c, 0xfff}, + { 0x182b8, 1, 0x1c, 0xfff}, + { 0x18308, 1, 0x1c, 0xfff}, + { 0x18318, 1, 0x1c, 0xfff}, + { 0x18338, 1, 0x1c, 0xfff}, + { 0x18348, 1, 0x1c, 0xfff}, + { 0x183bc, 1, 0x1c, 0x1fff}, + { 0x183cc, 1, 0x1c, 0x1fff}, + { 0x18570, 1, 0x18, 0xfff}, + { 0x18578, 1, 0x18, 0xfff}, + { 0x1858c, 1, 0x18, 0xfff}, + { 0x18594, 1, 0x18, 0xfff}, + { 0x1862c, 4, 0x10, 0xfff}, + { 0x2021c, 11, 0x1f, 0xfff}, + { 0x202a8, 1, 0x1f, 0xfff}, + { 0x202b8, 1, 0x1f, 0x1fff}, + { 0x20404, 1, 0x1f, 0xfff}, + { 0x2040c, 2, 0x1f, 0xfff}, + { 0x2041c, 2, 0x1f, 0xfff}, + { 0x40154, 14, 0x1f, 0xfff}, + { 0x40198, 1, 0x1f, 0x1fff}, + { 0x404ac, 1, 0x1f, 0xfff}, + { 0x404bc, 1, 0x1f, 0x1fff}, + { 0x42290, 1, 0x1f, 0xfff}, + { 0x422a0, 1, 0x1f, 0xfff}, + { 0x422b0, 1, 0x1f, 0x1fff}, + { 0x42548, 1, 0x1f, 0xfff}, + { 0x42550, 1, 0x1f, 0xfff}, + { 0x42558, 1, 0x1f, 0xfff}, + { 0x50160, 8, 0x1f, 0xfff}, + { 0x501d0, 1, 0x1f, 0xfff}, + { 0x501e0, 1, 0x1f, 0x1fff}, + { 0x50204, 1, 0x1f, 0xfff}, + { 0x5020c, 2, 0x1f, 0xfff}, + { 0x5021c, 1, 0x1f, 0xfff}, + { 0x60090, 1, 0x1f, 0xfff}, + { 0x6011c, 1, 0x1f, 0xfff}, + { 0x6012c, 1, 0x1f, 0x1fff}, + { 0xc101c, 1, 0x1f, 0xfff}, + { 0xc102c, 1, 0x1f, 0x1fff}, + { 0xc2290, 1, 0x1f, 0xfff}, + { 0xc22a0, 1, 0x1f, 0xfff}, + { 0xc22b0, 1, 0x1f, 0x1fff}, + { 0xc2548, 1, 0x1f, 0xfff}, + { 0xc2550, 1, 0x1f, 0xfff}, + { 0xc2558, 1, 0x1f, 0xfff}, + { 0xc4294, 1, 0x1f, 0xfff}, + { 0xc42a4, 1, 0x1f, 0xfff}, + { 0xc42b4, 1, 0x1f, 0x1fff}, + { 0xc4550, 1, 0x1f, 0xfff}, + { 0xc4558, 1, 0x1f, 0xfff}, + { 0xc4560, 1, 0x1f, 0xfff}, + { 0xd016c, 8, 0x1f, 0xfff}, + { 0xd01d8, 1, 0x1f, 0xfff}, + { 0xd01e8, 1, 0x1f, 0x1fff}, + { 0xd0204, 1, 0x1f, 0xfff}, + { 0xd020c, 3, 0x1f, 0xfff}, + { 0xe0154, 8, 0x1f, 0xfff}, + { 0xe01c8, 1, 0x1f, 0xfff}, + { 0xe01d8, 1, 0x1f, 0x1fff}, + { 0xe0204, 1, 0x1f, 0xfff}, + { 0xe020c, 2, 0x1f, 0xfff}, + { 0xe021c, 2, 0x1f, 0xfff}, + { 0x101014, 1, 0x1f, 0xfff}, + { 0x101030, 1, 0x1f, 0xfff}, + { 0x101040, 1, 0x1f, 0x1fff}, + { 0x102058, 1, 0x1f, 0x1fff}, + { 0x102080, 16, 0x1f, 0xfff}, + { 0x103004, 2, 0x1f, 0xfff}, + { 0x103068, 1, 0x1f, 0xfff}, + { 0x103078, 1, 0x1f, 0xfff}, + { 0x103088, 1, 0x1f, 0x1fff}, + { 0x10309c, 2, 0x1e, 0xfff}, + { 0x1030b8, 2, 0x1c, 0xfff}, + { 0x1030cc, 1, 0x1c, 0xfff}, + { 0x1030e0, 1, 0x1c, 0xfff}, + { 0x104004, 1, 0x1f, 0xfff}, + { 0x104018, 1, 0x1f, 0xfff}, + { 0x104020, 1, 0x1f, 0xfff}, + { 0x10403c, 1, 0x1f, 0xfff}, + { 0x1040fc, 1, 0x1f, 0xfff}, + { 0x10410c, 1, 0x1f, 0x1fff}, + { 0x104400, 1, 0x1f, 0x1fff}, + { 0x104404, 63, 0x1f, 0xfff}, + { 0x104800, 1, 0x1f, 0x1fff}, + { 0x104804, 63, 0x1f, 0xfff}, + { 0x105000, 4, 0x1f, 0x1fff}, + { 0x105010, 252, 0x1f, 0xfff}, + { 0x108094, 1, 0x3, 0xfff}, + { 0x1201b0, 2, 0x1f, 0xfff}, + { 0x12032c, 1, 0x1f, 0xfff}, + { 0x12036c, 3, 0x1f, 0xfff}, + { 0x120408, 2, 0x1f, 0xfff}, + { 0x120414, 15, 0x1f, 0xfff}, + { 0x120478, 2, 0x1f, 0xfff}, + { 0x12052c, 1, 0x1f, 0xfff}, + { 0x120564, 3, 0x1f, 0xfff}, + { 0x12057c, 1, 0x1f, 0x1fff}, + { 0x12058c, 1, 0x1f, 0x1fff}, + { 0x120608, 1, 0x1e, 0xfff}, + { 0x120748, 1, 0x1c, 0xfff}, + { 0x120778, 2, 0x1c, 0xfff}, + { 0x120808, 3, 0x1f, 0xfff}, + { 0x120818, 1, 0x1f, 0xfff}, + { 0x120820, 1, 0x1f, 0xfff}, + { 0x120828, 1, 0x1f, 0xfff}, + { 0x120830, 1, 0x1f, 0xfff}, + { 0x120838, 1, 0x1f, 0xfff}, + { 0x120840, 1, 0x1f, 0xfff}, + { 0x120848, 1, 0x1f, 0xfff}, + { 0x120850, 1, 0x1f, 0xfff}, + { 0x120858, 1, 0x1f, 0xfff}, + { 0x120860, 1, 0x1f, 0xfff}, + { 0x120868, 1, 0x1f, 0xfff}, + { 0x120870, 1, 0x1f, 0xfff}, + { 0x120878, 1, 0x1f, 0xfff}, + { 0x120880, 1, 0x1f, 0xfff}, + { 0x120888, 1, 0x1f, 0xfff}, + { 0x120890, 1, 0x1f, 0xfff}, + { 0x120898, 1, 0x1f, 0xfff}, + { 0x1208a0, 1, 0x1f, 0xfff}, + { 0x1208a8, 1, 0x1f, 0xfff}, + { 0x1208b0, 1, 0x1f, 0xfff}, + { 0x1208b8, 1, 0x1f, 0xfff}, + { 0x1208c0, 1, 0x1f, 0xfff}, + { 0x1208c8, 1, 0x1f, 0xfff}, + { 0x1208d0, 1, 0x1f, 0xfff}, + { 0x1208d8, 1, 0x1f, 0xfff}, + { 0x1208e0, 1, 0x1f, 0xfff}, + { 0x1208e8, 1, 0x1f, 0xfff}, + { 0x1208f0, 1, 0x1f, 0xfff}, + { 0x1208f8, 1, 0x1f, 0xfff}, + { 0x120900, 1, 0x1f, 0xfff}, + { 0x120908, 1, 0x1f, 0xfff}, + { 0x130030, 1, 0x1c, 0xfff}, + { 0x13004c, 3, 0x1c, 0xfff}, + { 0x130064, 2, 0x1c, 0xfff}, + { 0x13009c, 1, 0x1c, 0x1fff}, + { 0x130130, 1, 0x1c, 0xfff}, + { 0x13016c, 1, 0x1c, 0xfff}, + { 0x130300, 1, 0x1c, 0xfff}, + { 0x130480, 1, 0x1c, 0xfff}, + { 0x14005c, 2, 0xf, 0xfff}, + { 0x1400d0, 2, 0xf, 0xfff}, + { 0x1400e0, 1, 0xf, 0xfff}, + { 0x1401c8, 1, 0xf, 0xfff}, + { 0x140200, 6, 0xf, 0xfff}, + { 0x140338, 7, 0x10, 0xfff}, + { 0x140370, 7, 0x10, 0xfff}, + { 0x15c1bc, 6, 0x10, 0xfff}, + { 0x15c230, 7, 0x10, 0xfff}, + { 0x16101c, 1, 0x1f, 0xfff}, + { 0x16102c, 1, 0x1f, 0x1fff}, + { 0x164014, 2, 0x1f, 0xfff}, + { 0x1640f0, 1, 0x1f, 0xfff}, + { 0x166290, 1, 0x1f, 0xfff}, + { 0x1662a0, 1, 0x1f, 0xfff}, + { 0x1662b0, 1, 0x1f, 0x1fff}, + { 0x166548, 1, 0x1f, 0xfff}, + { 0x166550, 1, 0x1f, 0xfff}, + { 0x166558, 1, 0x1f, 0xfff}, + { 0x168000, 1, 0x1f, 0xfff}, + { 0x168008, 1, 0x1f, 0xfff}, + { 0x168010, 1, 0x1f, 0xfff}, + { 0x168018, 1, 0x1f, 0xfff}, + { 0x168028, 2, 0x1f, 0xfff}, + { 0x168058, 9, 0x1f, 0xfff}, + { 0x168238, 1, 0x1f, 0xfff}, + { 0x1682d0, 7, 0x1f, 0xfff}, + { 0x168300, 2, 0x3, 0xfff}, + { 0x168308, 65, 0x1f, 0xfff}, + { 0x168410, 2, 0x1f, 0xfff}, + { 0x168438, 1, 0x1f, 0xfff}, + { 0x168448, 1, 0x1f, 0x1fff}, + { 0x168a00, 128, 0x1f, 0xfff}, + { 0x16e200, 128, 0x2, 0xfff}, + { 0x16e404, 2, 0x2, 0xfff}, + { 0x16e584, 64, 0x2, 0xfff}, + { 0x16e684, 2, 0x1e, 0xfff}, + { 0x16e68c, 4, 0x2, 0xfff}, + { 0x16e6fc, 4, 0x1c, 0xfff}, + { 0x16e7ac, 12, 0x10, 0xfff}, + { 0x1700a4, 1, 0x1f, 0xfff}, + { 0x1700ac, 2, 0x1f, 0xfff}, + { 0x1700c0, 1, 0x1f, 0xfff}, + { 0x170174, 1, 0x1f, 0xfff}, + { 0x170184, 1, 0x1f, 0x1fff}, + { 0x1800f4, 1, 0x1f, 0xfff}, + { 0x180104, 1, 0x1f, 0xfff}, + { 0x180114, 1, 0x1f, 0x1fff}, + { 0x180124, 1, 0x1f, 0x1fff}, + { 0x18026c, 1, 0x1f, 0xfff}, + { 0x1802a0, 1, 0x1f, 0xfff}, + { 0x1b8000, 1, 0x1f, 0xfff}, + { 0x1b8040, 1, 0x1f, 0xfff}, + { 0x1b8080, 1, 0x1f, 0xfff}, + { 0x1b80c0, 1, 0x1f, 0xfff}, + { 0x200104, 1, 0x1f, 0xfff}, + { 0x200114, 1, 0x1f, 0xfff}, + { 0x200124, 1, 0x1f, 0x1fff}, + { 0x200134, 1, 0x1f, 0x1fff}, + { 0x20026c, 1, 0x1f, 0xfff}, + { 0x2002a0, 1, 0x1f, 0xfff}, + { 0x238000, 1, 0x1f, 0xfff}, + { 0x238040, 1, 0x1f, 0xfff}, + { 0x238080, 1, 0x1f, 0xfff}, + { 0x2380c0, 1, 0x1f, 0xfff}, + { 0x280104, 1, 0x1f, 0xfff}, + { 0x280114, 1, 0x1f, 0xfff}, + { 0x280124, 1, 0x1f, 0x1fff}, + { 0x280134, 1, 0x1f, 0x1fff}, + { 0x28026c, 1, 0x1f, 0xfff}, + { 0x2802a0, 1, 0x1f, 0xfff}, + { 0x2b8000, 1, 0x1f, 0xfff}, + { 0x2b8040, 1, 0x1f, 0xfff}, + { 0x2b8080, 1, 0x1f, 0xfff}, + { 0x300104, 1, 0x1f, 0xfff}, + { 0x300114, 1, 0x1f, 0xfff}, + { 0x300124, 1, 0x1f, 0x1fff}, + { 0x300134, 1, 0x1f, 0x1fff}, + { 0x30026c, 1, 0x1f, 0xfff}, + { 0x3002a0, 1, 0x1f, 0xfff}, + { 0x338000, 1, 0x1f, 0xfff}, + { 0x338040, 1, 0x1f, 0xfff}, + { 0x338080, 1, 0x1f, 0xfff}, + { 0x3380c0, 1, 0x1f, 0xfff} +}; -static const u32 page_vals_e2[] = { 0, 128 }; -#define PAGE_MODE_VALUES_E2 ARRAY_SIZE(page_vals_e2) +#define IDLE_REGS_COUNT ARRAY_SIZE(idle_reg_addrs) -static const u32 page_write_regs_e2[] = { 328476 }; -#define PAGE_WRITE_REGS_E2 ARRAY_SIZE(page_write_regs_e2) +static const u32 read_reg_e1[] = { + 0x1b1000}; -static const struct reg_addr page_read_regs_e2[] = { - { 0x58000, 4608, RI_E2_ONLINE } }; -#define PAGE_READ_REGS_E2 ARRAY_SIZE(page_read_regs_e2) +static const struct wreg_addr wreg_addr_e1 = { + 0x1b0c00, 192, 1, read_reg_e1, 0x1f, 0x1fff}; -static const u32 page_vals_e3[] = { 0, 128 }; -#define PAGE_MODE_VALUES_E3 ARRAY_SIZE(page_vals_e3) +static const u32 read_reg_e1h[] = { + 0x1b1040, 0x1b1000}; -static const u32 page_write_regs_e3[] = { 328476 }; -#define PAGE_WRITE_REGS_E3 ARRAY_SIZE(page_write_regs_e3) +static const struct wreg_addr wreg_addr_e1h = { + 0x1b0c00, 256, 2, read_reg_e1h, 0x1f, 0x1fff}; -static const struct reg_addr page_read_regs_e3[] = { - { 0x58000, 4608, RI_E3E3B0_ONLINE } }; -#define PAGE_READ_REGS_E3 ARRAY_SIZE(page_read_regs_e3) +static const u32 read_reg_e2[] = { + 0x1b1040, 0x1b1000}; -#endif /* BNX2X_DUMP_H */ +static const struct wreg_addr wreg_addr_e2 = { + 0x1b0c00, 128, 2, read_reg_e2, 0x1f, 0x1fff}; + +static const u32 read_reg_e3[] = { + 0x1b1040, 0x1b1000}; + +static const struct wreg_addr wreg_addr_e3 = { + 0x1b0c00, 128, 2, read_reg_e3, 0x1f, 0x1fff}; + +static const u32 read_reg_e3b0[] = { + 0x1b1040, 0x1b1000}; + +static const struct wreg_addr wreg_addr_e3b0 = { + 0x1b0c00, 128, 2, read_reg_e3b0, 0x1f, 0x1fff}; + +static const unsigned int dump_num_registers[NUM_CHIPS][NUM_PRESETS] = { + {20782, 18567, 27975, 19729, 18311, 27719, 20836, 32391, 41799, 20812, + 26247, 35655, 19074}, + {32774, 19297, 33277, 31721, 19041, 33021, 32828, 33121, 47101, 32804, + 26977, 40957, 35895}, + {36527, 17928, 33697, 35474, 18700, 34466, 36581, 31752, 47521, 36557, + 25608, 41377, 43903}, + {45239, 17936, 34387, 44186, 18708, 35156, 45293, 31760, 48211, 45269, + 25616, 42067, 43903}, + {45302, 17999, 34802, 44249, 18771, 35571, 45356, 31823, 48626, 45332, + 25679, 42482, 43903} +}; +#endif diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 31a8b38ab15..25eddd90f48 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1,12 +1,12 @@ /* bnx2x_ethtool.c: Broadcom Everest network driver. * - * Copyright (c) 2007-2011 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver * UDP CSUM errata workaround by Arik Gendelman @@ -22,13 +22,10 @@ #include <linux/types.h> #include <linux/sched.h> #include <linux/crc32.h> - - #include "bnx2x.h" #include "bnx2x_cmn.h" #include "bnx2x_dump.h" #include "bnx2x_init.h" -#include "bnx2x_sp.h" /* Note: in the format strings below %s is replaced by the queue-name which is * either its index or 'fcoe' for the fcoe queue. Make sure the format string @@ -65,7 +62,9 @@ static const struct { 8, "[%s]: tpa_aggregations" }, { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 8, "[%s]: tpa_aggregated_frames"}, - { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"} + { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"}, + { Q_STATS_OFFSET32(driver_filtered_tx_pkt), + 4, "[%s]: driver_filtered_tx_pkt" } }; #define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr) @@ -175,16 +174,26 @@ static const struct { { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, { STATS_OFFSET32(total_tpa_bytes_hi), - 8, STATS_FLAGS_FUNC, "tpa_bytes"} + 8, STATS_FLAGS_FUNC, "tpa_bytes"}, + { STATS_OFFSET32(recoverable_error), + 4, STATS_FLAGS_FUNC, "recoverable_errors" }, + { STATS_OFFSET32(unrecoverable_error), + 4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, + { STATS_OFFSET32(driver_filtered_tx_pkt), + 4, STATS_FLAGS_FUNC, "driver_filtered_tx_pkt" }, + { STATS_OFFSET32(eee_tx_lpi), + 4, STATS_FLAGS_PORT, "Tx LPI entry count"} }; #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) + static int bnx2x_get_port_type(struct bnx2x *bp) { int port_type; u32 phy_idx = bnx2x_get_cur_phy_idx(bp); switch (bp->link_params.phy[phy_idx].media_type) { - case ETH_PHY_SFP_FIBER: + case ETH_PHY_SFPP_10G_FIBER: + case ETH_PHY_SFP_1G_FIBER: case ETH_PHY_XFP_FIBER: case ETH_PHY_KR: case ETH_PHY_CX4: @@ -217,21 +226,25 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) (bp->port.supported[cfg_idx ^ 1] & (SUPPORTED_TP | SUPPORTED_FIBRE)); cmd->advertising = bp->port.advertising[cfg_idx]; + if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type == + ETH_PHY_SFP_1G_FIBER) { + cmd->supported &= ~(SUPPORTED_10000baseT_Full); + cmd->advertising &= ~(ADVERTISED_10000baseT_Full); + } - if ((bp->state == BNX2X_STATE_OPEN) && - !(bp->flags & MF_FUNC_DIS) && - (bp->link_vars.link_up)) { - ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed); + if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up && + !(bp->flags & MF_FUNC_DIS)) { cmd->duplex = bp->link_vars.duplex; + + if (IS_MF(bp) && !BP_NOMCP(bp)) + ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); + else + ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed); } else { - ethtool_cmd_speed_set( - cmd, bp->link_params.req_line_speed[cfg_idx]); - cmd->duplex = bp->link_params.req_duplex[cfg_idx]; + cmd->duplex = DUPLEX_UNKNOWN; + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); } - if (IS_MF(bp)) - ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); - cmd->port = bnx2x_get_port_type(bp); cmd->phy_address = bp->mdio.prtad; @@ -242,10 +255,40 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) else cmd->autoneg = AUTONEG_DISABLE; + /* Publish LP advertised speeds and FC */ + if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { + u32 status = bp->link_vars.link_status; + + cmd->lp_advertising |= ADVERTISED_Autoneg; + if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE) + cmd->lp_advertising |= ADVERTISED_Pause; + if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) + cmd->lp_advertising |= ADVERTISED_Asym_Pause; + + if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_10baseT_Half; + if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_10baseT_Full; + if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_100baseT_Half; + if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_100baseT_Full; + if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_1000baseT_Half; + if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_1000baseT_Full; + if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_2500baseX_Full; + if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_10000baseT_Full; + if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full; + } + cmd->maxtxpkt = 0; cmd->maxrxpkt = 0; - DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" + DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" " duplex %d port %d phy_address %d transceiver %d\n" " autoneg %d maxtxpkt %d maxrxpkt %d\n", @@ -261,12 +304,12 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct bnx2x *bp = netdev_priv(dev); u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; - u32 speed; + u32 speed, phy_idx; if (IS_MF_SD(bp)) return 0; - DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" + DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" " duplex %d port %d phy_address %d transceiver %d\n" " autoneg %d maxtxpkt %d maxrxpkt %d\n", @@ -277,6 +320,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) speed = ethtool_cmd_speed(cmd); + /* If received a request for an unknown duplex, assume full*/ + if (cmd->duplex == DUPLEX_UNKNOWN) + cmd->duplex = DUPLEX_FULL; + if (IS_MF_SI(bp)) { u32 part; u32 line_speed = bp->link_vars.line_speed; @@ -286,18 +333,17 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) line_speed = 10000; if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) { - BNX2X_DEV_INFO("To set speed BC %X or higher " - "is required, please upgrade BC\n", - REQ_BC_VER_4_SET_MF_BW); + DP(BNX2X_MSG_ETHTOOL, + "To set speed BC %X or higher is required, please upgrade BC\n", + REQ_BC_VER_4_SET_MF_BW); return -EINVAL; } part = (speed * 100) / line_speed; if (line_speed < speed || !part) { - BNX2X_DEV_INFO("Speed setting should be in a range " - "from 1%% to 100%% " - "of actual line speed\n"); + DP(BNX2X_MSG_ETHTOOL, + "Speed setting should be in a range from 1%% to 100%% of actual line speed\n"); return -EINVAL; } @@ -312,57 +358,56 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) cfg_idx = bnx2x_get_link_cfg_idx(bp); old_multi_phy_config = bp->link_params.multi_phy_config; - switch (cmd->port) { - case PORT_TP: - if (bp->port.supported[cfg_idx] & SUPPORTED_TP) - break; /* no port change */ - - if (!(bp->port.supported[0] & SUPPORTED_TP || - bp->port.supported[1] & SUPPORTED_TP)) { - DP(NETIF_MSG_LINK, "Unsupported port type\n"); - return -EINVAL; - } - bp->link_params.multi_phy_config &= - ~PORT_HW_CFG_PHY_SELECTION_MASK; - if (bp->link_params.multi_phy_config & - PORT_HW_CFG_PHY_SWAPPED_ENABLED) - bp->link_params.multi_phy_config |= - PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; - else - bp->link_params.multi_phy_config |= - PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; - break; - case PORT_FIBRE: - case PORT_DA: - if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE) - break; /* no port change */ - - if (!(bp->port.supported[0] & SUPPORTED_FIBRE || - bp->port.supported[1] & SUPPORTED_FIBRE)) { - DP(NETIF_MSG_LINK, "Unsupported port type\n"); + if (cmd->port != bnx2x_get_port_type(bp)) { + switch (cmd->port) { + case PORT_TP: + if (!(bp->port.supported[0] & SUPPORTED_TP || + bp->port.supported[1] & SUPPORTED_TP)) { + DP(BNX2X_MSG_ETHTOOL, + "Unsupported port type\n"); + return -EINVAL; + } + bp->link_params.multi_phy_config &= + ~PORT_HW_CFG_PHY_SELECTION_MASK; + if (bp->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; + else + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; + break; + case PORT_FIBRE: + case PORT_DA: + case PORT_NONE: + if (!(bp->port.supported[0] & SUPPORTED_FIBRE || + bp->port.supported[1] & SUPPORTED_FIBRE)) { + DP(BNX2X_MSG_ETHTOOL, + "Unsupported port type\n"); + return -EINVAL; + } + bp->link_params.multi_phy_config &= + ~PORT_HW_CFG_PHY_SELECTION_MASK; + if (bp->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; + else + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; + break; + default: + DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); return -EINVAL; } - bp->link_params.multi_phy_config &= - ~PORT_HW_CFG_PHY_SELECTION_MASK; - if (bp->link_params.multi_phy_config & - PORT_HW_CFG_PHY_SWAPPED_ENABLED) - bp->link_params.multi_phy_config |= - PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; - else - bp->link_params.multi_phy_config |= - PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; - break; - default: - DP(NETIF_MSG_LINK, "Unsupported port type\n"); - return -EINVAL; } - /* Save new config in case command complete successully */ + /* Save new config in case command complete successfully */ new_multi_phy_config = bp->link_params.multi_phy_config; /* Get the new cfg_idx */ cfg_idx = bnx2x_get_link_cfg_idx(bp); /* Restore old config in case command failed */ bp->link_params.multi_phy_config = old_multi_phy_config; - DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx); + DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx); if (cmd->autoneg == AUTONEG_ENABLE) { u32 an_supported_speed = bp->port.supported[cfg_idx]; @@ -371,14 +416,14 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) an_supported_speed |= (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full); if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { - DP(NETIF_MSG_LINK, "Autoneg not supported\n"); + DP(BNX2X_MSG_ETHTOOL, "Autoneg not supported\n"); return -EINVAL; } /* advertise the requested speed and duplex if supported */ if (cmd->advertising & ~an_supported_speed) { - DP(NETIF_MSG_LINK, "Advertisement parameters " - "are not supported\n"); + DP(BNX2X_MSG_ETHTOOL, + "Advertisement parameters are not supported\n"); return -EINVAL; } @@ -419,6 +464,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ADVERTISED_10000baseKR_Full)) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; + + if (cmd->advertising & ADVERTISED_20000baseKR2_Full) + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_20G; } } else { /* forced speed */ /* advertise the requested speed and duplex if supported */ @@ -427,7 +476,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (cmd->duplex == DUPLEX_FULL) { if (!(bp->port.supported[cfg_idx] & SUPPORTED_10baseT_Full)) { - DP(NETIF_MSG_LINK, + DP(BNX2X_MSG_ETHTOOL, "10M full not supported\n"); return -EINVAL; } @@ -437,7 +486,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } else { if (!(bp->port.supported[cfg_idx] & SUPPORTED_10baseT_Half)) { - DP(NETIF_MSG_LINK, + DP(BNX2X_MSG_ETHTOOL, "10M half not supported\n"); return -EINVAL; } @@ -451,7 +500,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (cmd->duplex == DUPLEX_FULL) { if (!(bp->port.supported[cfg_idx] & SUPPORTED_100baseT_Full)) { - DP(NETIF_MSG_LINK, + DP(BNX2X_MSG_ETHTOOL, "100M full not supported\n"); return -EINVAL; } @@ -461,7 +510,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } else { if (!(bp->port.supported[cfg_idx] & SUPPORTED_100baseT_Half)) { - DP(NETIF_MSG_LINK, + DP(BNX2X_MSG_ETHTOOL, "100M half not supported\n"); return -EINVAL; } @@ -473,13 +522,15 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) case SPEED_1000: if (cmd->duplex != DUPLEX_FULL) { - DP(NETIF_MSG_LINK, "1G half not supported\n"); + DP(BNX2X_MSG_ETHTOOL, + "1G half not supported\n"); return -EINVAL; } if (!(bp->port.supported[cfg_idx] & SUPPORTED_1000baseT_Full)) { - DP(NETIF_MSG_LINK, "1G full not supported\n"); + DP(BNX2X_MSG_ETHTOOL, + "1G full not supported\n"); return -EINVAL; } @@ -489,14 +540,14 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) case SPEED_2500: if (cmd->duplex != DUPLEX_FULL) { - DP(NETIF_MSG_LINK, + DP(BNX2X_MSG_ETHTOOL, "2.5G half not supported\n"); return -EINVAL; } if (!(bp->port.supported[cfg_idx] & SUPPORTED_2500baseX_Full)) { - DP(NETIF_MSG_LINK, + DP(BNX2X_MSG_ETHTOOL, "2.5G full not supported\n"); return -EINVAL; } @@ -507,13 +558,17 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) case SPEED_10000: if (cmd->duplex != DUPLEX_FULL) { - DP(NETIF_MSG_LINK, "10G half not supported\n"); + DP(BNX2X_MSG_ETHTOOL, + "10G half not supported\n"); return -EINVAL; } - + phy_idx = bnx2x_get_cur_phy_idx(bp); if (!(bp->port.supported[cfg_idx] - & SUPPORTED_10000baseT_Full)) { - DP(NETIF_MSG_LINK, "10G full not supported\n"); + & SUPPORTED_10000baseT_Full) || + (bp->link_params.phy[phy_idx].media_type == + ETH_PHY_SFP_1G_FIBER)) { + DP(BNX2X_MSG_ETHTOOL, + "10G full not supported\n"); return -EINVAL; } @@ -522,7 +577,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; default: - DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed); + DP(BNX2X_MSG_ETHTOOL, "Unsupported speed %u\n", speed); return -EINVAL; } @@ -531,7 +586,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) bp->port.advertising[cfg_idx] = advertising; } - DP(NETIF_MSG_LINK, "req_line_speed %d\n" + DP(BNX2X_MSG_ETHTOOL, "req_line_speed %d\n" " req_duplex %d advertising 0x%x\n", bp->link_params.req_line_speed[cfg_idx], bp->link_params.req_duplex[cfg_idx], @@ -547,31 +602,63 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return 0; } -#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE) -#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE) -#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE) -#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE) -#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE) +#define DUMP_ALL_PRESETS 0x1FFF +#define DUMP_MAX_PRESETS 13 -static inline bool bnx2x_is_reg_online(struct bnx2x *bp, - const struct reg_addr *reg_info) +static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset) { if (CHIP_IS_E1(bp)) - return IS_E1_ONLINE(reg_info->info); + return dump_num_registers[0][preset-1]; else if (CHIP_IS_E1H(bp)) - return IS_E1H_ONLINE(reg_info->info); + return dump_num_registers[1][preset-1]; else if (CHIP_IS_E2(bp)) - return IS_E2_ONLINE(reg_info->info); + return dump_num_registers[2][preset-1]; else if (CHIP_IS_E3A0(bp)) - return IS_E3_ONLINE(reg_info->info); + return dump_num_registers[3][preset-1]; else if (CHIP_IS_E3B0(bp)) - return IS_E3B0_ONLINE(reg_info->info); + return dump_num_registers[4][preset-1]; else - return false; + return 0; } +static int __bnx2x_get_regs_len(struct bnx2x *bp) +{ + u32 preset_idx; + int regdump_len = 0; + + /* Calculate the total preset regs length */ + for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) + regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx); + + return regdump_len; +} + +static int bnx2x_get_regs_len(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + int regdump_len = 0; + + if (IS_VF(bp)) + return 0; + + regdump_len = __bnx2x_get_regs_len(bp); + regdump_len *= 4; + regdump_len += sizeof(struct dump_header); + + return regdump_len; +} + +#define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1) +#define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H) +#define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2) +#define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0) +#define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0) + +#define IS_REG_IN_PRESET(presets, idx) \ + ((presets & (1 << (idx-1))) == (1 << (idx-1))) + /******* Paged registers info selectors ********/ -static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) +static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) { if (CHIP_IS_E2(bp)) return page_vals_e2; @@ -581,7 +668,7 @@ static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) return NULL; } -static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) +static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) { if (CHIP_IS_E2(bp)) return PAGE_MODE_VALUES_E2; @@ -591,7 +678,7 @@ static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) return 0; } -static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) +static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) { if (CHIP_IS_E2(bp)) return page_write_regs_e2; @@ -601,7 +688,7 @@ static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) return NULL; } -static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp) +static u32 __bnx2x_get_page_write_num(struct bnx2x *bp) { if (CHIP_IS_E2(bp)) return PAGE_WRITE_REGS_E2; @@ -611,7 +698,7 @@ static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp) return 0; } -static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) +static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) { if (CHIP_IS_E2(bp)) return page_read_regs_e2; @@ -621,7 +708,7 @@ static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) return NULL; } -static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp) +static u32 __bnx2x_get_page_read_num(struct bnx2x *bp) { if (CHIP_IS_E2(bp)) return PAGE_READ_REGS_E2; @@ -631,38 +718,38 @@ static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp) return 0; } -static inline int __bnx2x_get_regs_len(struct bnx2x *bp) +static bool bnx2x_is_reg_in_chip(struct bnx2x *bp, + const struct reg_addr *reg_info) { - int num_pages = __bnx2x_get_page_reg_num(bp); - int page_write_num = __bnx2x_get_page_write_num(bp); - const struct reg_addr *page_read_addr = __bnx2x_get_page_read_ar(bp); - int page_read_num = __bnx2x_get_page_read_num(bp); - int regdump_len = 0; - int i, j, k; - - for (i = 0; i < REGS_COUNT; i++) - if (bnx2x_is_reg_online(bp, ®_addrs[i])) - regdump_len += reg_addrs[i].size; - - for (i = 0; i < num_pages; i++) - for (j = 0; j < page_write_num; j++) - for (k = 0; k < page_read_num; k++) - if (bnx2x_is_reg_online(bp, &page_read_addr[k])) - regdump_len += page_read_addr[k].size; - - return regdump_len; + if (CHIP_IS_E1(bp)) + return IS_E1_REG(reg_info->chips); + else if (CHIP_IS_E1H(bp)) + return IS_E1H_REG(reg_info->chips); + else if (CHIP_IS_E2(bp)) + return IS_E2_REG(reg_info->chips); + else if (CHIP_IS_E3A0(bp)) + return IS_E3A0_REG(reg_info->chips); + else if (CHIP_IS_E3B0(bp)) + return IS_E3B0_REG(reg_info->chips); + else + return false; } -static int bnx2x_get_regs_len(struct net_device *dev) +static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp, + const struct wreg_addr *wreg_info) { - struct bnx2x *bp = netdev_priv(dev); - int regdump_len = 0; - - regdump_len = __bnx2x_get_regs_len(bp); - regdump_len *= 4; - regdump_len += sizeof(struct dump_hdr); - - return regdump_len; + if (CHIP_IS_E1(bp)) + return IS_E1_REG(wreg_info->chips); + else if (CHIP_IS_E1H(bp)) + return IS_E1H_REG(wreg_info->chips); + else if (CHIP_IS_E2(bp)) + return IS_E2_REG(wreg_info->chips); + else if (CHIP_IS_E3A0(bp)) + return IS_E3A0_REG(wreg_info->chips); + else if (CHIP_IS_E3B0(bp)) + return IS_E3B0_REG(wreg_info->chips); + else + return false; } /** @@ -676,9 +763,10 @@ static int bnx2x_get_regs_len(struct net_device *dev) * ("read address"). There may be more than one write address per "page" and * more than one read address per write address. */ -static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p) +static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset) { u32 i, j, k, n; + /* addresses of the paged registers */ const u32 *page_addr = __bnx2x_get_page_addr_ar(bp); /* number of paged registers */ @@ -691,32 +779,100 @@ static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p) const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp); /* number of read addresses */ int read_num = __bnx2x_get_page_read_num(bp); + u32 addr, size; for (i = 0; i < num_pages; i++) { for (j = 0; j < write_num; j++) { REG_WR(bp, write_addr[j], page_addr[i]); - for (k = 0; k < read_num; k++) - if (bnx2x_is_reg_online(bp, &read_addr[k])) - for (n = 0; n < - read_addr[k].size; n++) - *p++ = REG_RD(bp, - read_addr[k].addr + n*4); + + for (k = 0; k < read_num; k++) { + if (IS_REG_IN_PRESET(read_addr[k].presets, + preset)) { + size = read_addr[k].size; + for (n = 0; n < size; n++) { + addr = read_addr[k].addr + n*4; + *p++ = REG_RD(bp, addr); + } + } + } } } } -static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) +static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset) { - u32 i, j; + u32 i, j, addr; + const struct wreg_addr *wreg_addr_p = NULL; + + if (CHIP_IS_E1(bp)) + wreg_addr_p = &wreg_addr_e1; + else if (CHIP_IS_E1H(bp)) + wreg_addr_p = &wreg_addr_e1h; + else if (CHIP_IS_E2(bp)) + wreg_addr_p = &wreg_addr_e2; + else if (CHIP_IS_E3A0(bp)) + wreg_addr_p = &wreg_addr_e3; + else if (CHIP_IS_E3B0(bp)) + wreg_addr_p = &wreg_addr_e3b0; + + /* Read the idle_chk registers */ + for (i = 0; i < IDLE_REGS_COUNT; i++) { + if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) && + IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) { + for (j = 0; j < idle_reg_addrs[i].size; j++) + *p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4); + } + } /* Read the regular registers */ - for (i = 0; i < REGS_COUNT; i++) - if (bnx2x_is_reg_online(bp, ®_addrs[i])) + for (i = 0; i < REGS_COUNT; i++) { + if (bnx2x_is_reg_in_chip(bp, ®_addrs[i]) && + IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) { for (j = 0; j < reg_addrs[i].size; j++) *p++ = REG_RD(bp, reg_addrs[i].addr + j*4); + } + } + + /* Read the CAM registers */ + if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) && + IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) { + for (i = 0; i < wreg_addr_p->size; i++) { + *p++ = REG_RD(bp, wreg_addr_p->addr + i*4); + + /* In case of wreg_addr register, read additional + registers from read_regs array + */ + for (j = 0; j < wreg_addr_p->read_regs_count; j++) { + addr = *(wreg_addr_p->read_regs); + *p++ = REG_RD(bp, addr + j*4); + } + } + } - /* Read "paged" registes */ - bnx2x_read_pages_regs(bp, p); + /* Paged registers are supported in E2 & E3 only */ + if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) { + /* Read "paged" registers */ + bnx2x_read_pages_regs(bp, p, preset); + } + + return 0; +} + +static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) +{ + u32 preset_idx; + + /* Read all registers, by reading all preset registers */ + for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { + /* Skip presets with IOR */ + if ((preset_idx == 2) || + (preset_idx == 5) || + (preset_idx == 8) || + (preset_idx == 11)) + continue; + __bnx2x_get_preset_regs(bp, p, preset_idx); + p += __bnx2x_get_preset_regs_len(bp, preset_idx); + } } static void bnx2x_get_regs(struct net_device *dev, @@ -724,9 +880,9 @@ static void bnx2x_get_regs(struct net_device *dev, { u32 *p = _p; struct bnx2x *bp = netdev_priv(dev); - struct dump_hdr dump_hdr = {0}; + struct dump_header dump_hdr = {0}; - regs->version = 0; + regs->version = 2; memset(p, 0, regs->len); if (!netif_running(bp->dev)) @@ -736,25 +892,31 @@ static void bnx2x_get_regs(struct net_device *dev, * cause false alarms by reading never written registers. We * will re-enable parity attentions right after the dump. */ - bnx2x_disable_blocks_parity(bp); - dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; - dump_hdr.dump_sign = dump_sign_all; - dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); - dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR); - dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR); - dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR); + bnx2x_disable_blocks_parity(bp); - if (CHIP_IS_E1(bp)) - dump_hdr.info = RI_E1_ONLINE; - else if (CHIP_IS_E1H(bp)) - dump_hdr.info = RI_E1H_ONLINE; - else if (!CHIP_IS_E1x(bp)) - dump_hdr.info = RI_E2_ONLINE | - (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP); + dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; + dump_hdr.preset = DUMP_ALL_PRESETS; + dump_hdr.version = BNX2X_DUMP_VERSION; + + /* dump_meta_data presents OR of CHIP and PATH. */ + if (CHIP_IS_E1(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E1; + } else if (CHIP_IS_E1H(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E1H; + } else if (CHIP_IS_E2(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E2 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } else if (CHIP_IS_E3A0(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } else if (CHIP_IS_E3B0(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } - memcpy(p, &dump_hdr, sizeof(struct dump_hdr)); - p += dump_hdr.hdr_size + 1; + memcpy(p, &dump_hdr, sizeof(struct dump_header)); + p += dump_hdr.header_size + 1; /* Actually read the registers */ __bnx2x_get_regs(bp, p); @@ -764,34 +926,107 @@ static void bnx2x_get_regs(struct net_device *dev, bnx2x_enable_blocks_parity(bp); } +static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset) +{ + struct bnx2x *bp = netdev_priv(dev); + int regdump_len = 0; + + regdump_len = __bnx2x_get_preset_regs_len(bp, preset); + regdump_len *= 4; + regdump_len += sizeof(struct dump_header); + + return regdump_len; +} + +static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val) +{ + struct bnx2x *bp = netdev_priv(dev); + + /* Use the ethtool_dump "flag" field as the dump preset index */ + if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS) + return -EINVAL; + + bp->dump_preset_idx = val->flag; + return 0; +} + +static int bnx2x_get_dump_flag(struct net_device *dev, + struct ethtool_dump *dump) +{ + struct bnx2x *bp = netdev_priv(dev); + + dump->version = BNX2X_DUMP_VERSION; + dump->flag = bp->dump_preset_idx; + /* Calculate the requested preset idx length */ + dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx); + DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n", + bp->dump_preset_idx, dump->len); + return 0; +} + +static int bnx2x_get_dump_data(struct net_device *dev, + struct ethtool_dump *dump, + void *buffer) +{ + u32 *p = buffer; + struct bnx2x *bp = netdev_priv(dev); + struct dump_header dump_hdr = {0}; + + /* Disable parity attentions as long as following dump may + * cause false alarms by reading never written registers. We + * will re-enable parity attentions right after the dump. + */ + + bnx2x_disable_blocks_parity(bp); + + dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; + dump_hdr.preset = bp->dump_preset_idx; + dump_hdr.version = BNX2X_DUMP_VERSION; + + DP(BNX2X_MSG_ETHTOOL, "Get dump data of preset %d\n", dump_hdr.preset); + + /* dump_meta_data presents OR of CHIP and PATH. */ + if (CHIP_IS_E1(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E1; + } else if (CHIP_IS_E1H(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E1H; + } else if (CHIP_IS_E2(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E2 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } else if (CHIP_IS_E3A0(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } else if (CHIP_IS_E3B0(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } + + memcpy(p, &dump_hdr, sizeof(struct dump_header)); + p += dump_hdr.header_size + 1; + + /* Actually read the registers */ + __bnx2x_get_preset_regs(bp, p, dump_hdr.preset); + + /* Re-enable parity attentions */ + bnx2x_clear_blocks_parity(bp); + bnx2x_enable_blocks_parity(bp); + + return 0; +} + static void bnx2x_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct bnx2x *bp = netdev_priv(dev); - u8 phy_fw_ver[PHY_FW_VER_LEN]; strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); - phy_fw_ver[0] = '\0'; - if (bp->port.pmf) { - bnx2x_acquire_phy_lock(bp); - bnx2x_get_ext_phy_fw_version(&bp->link_params, - (bp->state != BNX2X_STATE_CLOSED), - phy_fw_ver, PHY_FW_VER_LEN); - bnx2x_release_phy_lock(bp); - } + bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version)); - strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version)); - snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), - "bc %d.%d.%d%s%s", - (bp->common.bc_ver & 0xff0000) >> 16, - (bp->common.bc_ver & 0xff00) >> 8, - (bp->common.bc_ver & 0xff), - ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); info->n_stats = BNX2X_NUM_STATS; - info->testinfo_len = BNX2X_NUM_TESTS; + info->testinfo_len = BNX2X_NUM_TESTS(bp); info->eedump_len = bp->common.flash_size; info->regdump_len = bnx2x_get_regs_len(dev); } @@ -817,13 +1052,16 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bnx2x *bp = netdev_priv(dev); - if (wol->wolopts & ~WAKE_MAGIC) + if (wol->wolopts & ~WAKE_MAGIC) { + DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n"); return -EINVAL; + } if (wol->wolopts & WAKE_MAGIC) { - if (bp->flags & NO_WOL_FLAG) + if (bp->flags & NO_WOL_FLAG) { + DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n"); return -EINVAL; - + } bp->wol = 1; } else bp->wol = 0; @@ -844,7 +1082,7 @@ static void bnx2x_set_msglevel(struct net_device *dev, u32 level) if (capable(CAP_NET_ADMIN)) { /* dump MCP trace */ - if (level & BNX2X_MSG_MCP) + if (IS_PF(bp) && (level & BNX2X_MSG_MCP)) bnx2x_fw_dump_lvl(bp, KERN_INFO); bp->msg_enable = level; } @@ -859,6 +1097,7 @@ static int bnx2x_nway_reset(struct net_device *dev) if (netif_running(dev)) { bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_force_link_reset(bp); bnx2x_link_set(bp); } @@ -882,11 +1121,27 @@ static int bnx2x_get_eeprom_len(struct net_device *dev) return bp->common.flash_size; } +/* Per pf misc lock must be acquired before the per port mcp lock. Otherwise, + * had we done things the other way around, if two pfs from the same port would + * attempt to access nvram at the same time, we could run into a scenario such + * as: + * pf A takes the port lock. + * pf B succeeds in taking the same lock since they are from the same port. + * pf A takes the per pf misc lock. Performs eeprom access. + * pf A finishes. Unlocks the per pf misc lock. + * Pf B takes the lock and proceeds to perform it's own access. + * pf A unlocks the per port lock, while pf B is still working (!). + * mcp takes the per port lock and corrupts pf B's access (and/or has it's own + * access corrupted by pf B) + */ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) { int port = BP_PORT(bp); int count, i; - u32 val = 0; + u32 val; + + /* acquire HW lock: protect against other PFs in PF Direct Assignment */ + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM); /* adjust timeout for emulation/FPGA */ count = BNX2X_NVRAM_TIMEOUT_COUNT; @@ -906,7 +1161,8 @@ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) } if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { - DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n"); + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot get access to nvram interface\n"); return -EBUSY; } @@ -917,7 +1173,7 @@ static int bnx2x_release_nvram_lock(struct bnx2x *bp) { int port = BP_PORT(bp); int count, i; - u32 val = 0; + u32 val; /* adjust timeout for emulation/FPGA */ count = BNX2X_NVRAM_TIMEOUT_COUNT; @@ -937,10 +1193,13 @@ static int bnx2x_release_nvram_lock(struct bnx2x *bp) } if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { - DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n"); + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot free access to nvram interface\n"); return -EBUSY; } + /* release HW lock: protect against other PFs in PF Direct Assignment */ + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM); return 0; } @@ -1003,13 +1262,16 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); /* we read nvram data in cpu order * but ethtool sees it as an array of bytes - * converting to big-endian will do the work */ + * converting to big-endian will do the work + */ *ret_val = cpu_to_be32(val); rc = 0; break; } } - + if (rc == -EBUSY) + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "nvram read timeout expired\n"); return rc; } @@ -1021,15 +1283,15 @@ static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, __be32 val; if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { - DP(BNX2X_MSG_NVM, + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "Invalid parameter: offset 0x%x buf_size 0x%x\n", offset, buf_size); return -EINVAL; } if (offset + buf_size > bp->common.flash_size) { - DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" - " buf_size (0x%x) > flash_size (0x%x)\n", + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n", offset, buf_size, bp->common.flash_size); return -EINVAL; } @@ -1068,27 +1330,176 @@ static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, return rc; } +static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf, + int buf_size) +{ + int rc; + + rc = bnx2x_nvram_read(bp, offset, (u8 *)buf, buf_size); + + if (!rc) { + __be32 *be = (__be32 *)buf; + + while ((buf_size -= 4) >= 0) + *buf++ = be32_to_cpu(*be++); + } + + return rc; +} + +static bool bnx2x_is_nvm_accessible(struct bnx2x *bp) +{ + int rc = 1; + u16 pm = 0; + struct net_device *dev = pci_get_drvdata(bp->pdev); + + if (bp->pdev->pm_cap) + rc = pci_read_config_word(bp->pdev, + bp->pdev->pm_cap + PCI_PM_CTRL, &pm); + + if ((rc && !netif_running(dev)) || + (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0))) + return false; + + return true; +} + static int bnx2x_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *eebuf) { struct bnx2x *bp = netdev_priv(dev); - int rc; - if (!netif_running(dev)) + if (!bnx2x_is_nvm_accessible(bp)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); return -EAGAIN; + } - DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, eeprom->len, eeprom->len); /* parameters already validated in ethtool_get_eeprom */ - rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); + return bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); +} +static int bnx2x_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct bnx2x *bp = netdev_priv(dev); + int rc = -EINVAL, phy_idx; + u8 *user_data = data; + unsigned int start_addr = ee->offset, xfer_size = 0; + + if (!bnx2x_is_nvm_accessible(bp)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); + return -EAGAIN; + } + + phy_idx = bnx2x_get_cur_phy_idx(bp); + + /* Read A0 section */ + if (start_addr < ETH_MODULE_SFF_8079_LEN) { + /* Limit transfer size to the A0 section boundary */ + if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN) + xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr; + else + xfer_size = ee->len; + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], + &bp->link_params, + I2C_DEV_ADDR_A0, + start_addr, + xfer_size, + user_data); + bnx2x_release_phy_lock(bp); + if (rc) { + DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n"); + + return -EINVAL; + } + user_data += xfer_size; + start_addr += xfer_size; + } + + /* Read A2 section */ + if ((start_addr >= ETH_MODULE_SFF_8079_LEN) && + (start_addr < ETH_MODULE_SFF_8472_LEN)) { + xfer_size = ee->len - xfer_size; + /* Limit transfer size to the A2 section boundary */ + if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN) + xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr; + start_addr -= ETH_MODULE_SFF_8079_LEN; + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], + &bp->link_params, + I2C_DEV_ADDR_A2, + start_addr, + xfer_size, + user_data); + bnx2x_release_phy_lock(bp); + if (rc) { + DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n"); + return -EINVAL; + } + } return rc; } +static int bnx2x_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct bnx2x *bp = netdev_priv(dev); + int phy_idx, rc; + u8 sff8472_comp, diag_type; + + if (!bnx2x_is_nvm_accessible(bp)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); + return -EAGAIN; + } + phy_idx = bnx2x_get_cur_phy_idx(bp); + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], + &bp->link_params, + I2C_DEV_ADDR_A0, + SFP_EEPROM_SFF_8472_COMP_ADDR, + SFP_EEPROM_SFF_8472_COMP_SIZE, + &sff8472_comp); + bnx2x_release_phy_lock(bp); + if (rc) { + DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n"); + return -EINVAL; + } + + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], + &bp->link_params, + I2C_DEV_ADDR_A0, + SFP_EEPROM_DIAG_TYPE_ADDR, + SFP_EEPROM_DIAG_TYPE_SIZE, + &diag_type); + bnx2x_release_phy_lock(bp); + if (rc) { + DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n"); + return -EINVAL; + } + + if (!sff8472_comp || + (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + return 0; +} + static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, u32 cmd_flags) { @@ -1126,6 +1537,9 @@ static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, } } + if (rc == -EBUSY) + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "nvram write timeout expired\n"); return rc; } @@ -1135,13 +1549,12 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, int buf_size) { int rc; - u32 cmd_flags; - u32 align_offset; - __be32 val; + u32 cmd_flags, align_offset, val; + __be32 val_be; if (offset + buf_size > bp->common.flash_size) { - DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" - " buf_size (0x%x) > flash_size (0x%x)\n", + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n", offset, buf_size, bp->common.flash_size); return -EINVAL; } @@ -1156,15 +1569,18 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); align_offset = (offset & ~0x03); - rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags); + rc = bnx2x_nvram_read_dword(bp, align_offset, &val_be, cmd_flags); if (rc == 0) { - val &= ~(0xff << BYTE_OFFSET(offset)); - val |= (*data_buf << BYTE_OFFSET(offset)); - /* nvram data is returned as an array of bytes - * convert it back to cpu order */ - val = be32_to_cpu(val); + * convert it back to cpu order + */ + val = be32_to_cpu(val_be); + + val &= ~le32_to_cpu((__force __le32) + (0xff << BYTE_OFFSET(offset))); + val |= le32_to_cpu((__force __le32) + (*data_buf << BYTE_OFFSET(offset))); rc = bnx2x_nvram_write_dword(bp, align_offset, val, cmd_flags); @@ -1189,15 +1605,15 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { - DP(BNX2X_MSG_NVM, + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "Invalid parameter: offset 0x%x buf_size 0x%x\n", offset, buf_size); return -EINVAL; } if (offset + buf_size > bp->common.flash_size) { - DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" - " buf_size (0x%x) > flash_size (0x%x)\n", + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n", offset, buf_size, bp->common.flash_size); return -EINVAL; } @@ -1222,6 +1638,12 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, memcpy(&val, data_buf, 4); + /* Notice unlike bnx2x_nvram_read_dword() this will not + * change val using be32_to_cpu(), which causes data to flip + * if the eeprom is read and then written back. This is due + * to tools utilizing this functionality that would break + * if this would be resolved. + */ rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags); /* advance to the next dword */ @@ -1245,10 +1667,14 @@ static int bnx2x_set_eeprom(struct net_device *dev, int port = BP_PORT(bp); int rc = 0; u32 ext_phy_config; - if (!netif_running(dev)) + + if (!bnx2x_is_nvm_accessible(bp)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); return -EAGAIN; + } - DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, eeprom->len, eeprom->len); @@ -1257,8 +1683,11 @@ static int bnx2x_set_eeprom(struct net_device *dev, /* PHY eeprom can be accessed only by the PMF */ if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) && - !bp->port.pmf) + !bp->port.pmf) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "wrong magic or interface is not pmf\n"); return -EINVAL; + } ext_phy_config = SHMEM_RD(bp, @@ -1360,7 +1789,7 @@ static void bnx2x_get_ringparam(struct net_device *dev, else ering->rx_pending = MAX_RX_AVAIL; - ering->tx_max_pending = MAX_TX_AVAIL; + ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; ering->tx_pending = bp->tx_ring_size; } @@ -1369,17 +1798,24 @@ static int bnx2x_set_ringparam(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); + DP(BNX2X_MSG_ETHTOOL, + "set ring params command parameters: rx_pending = %d, tx_pending = %d\n", + ering->rx_pending, ering->tx_pending); + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - pr_err("Handling parity error recovery. Try again later\n"); + DP(BNX2X_MSG_ETHTOOL, + "Handling parity error recovery. Try again later\n"); return -EAGAIN; } if ((ering->rx_pending > MAX_RX_AVAIL) || (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) || - (ering->tx_pending > MAX_TX_AVAIL) || - (ering->tx_pending <= MAX_SKB_FRAGS + 4)) + (ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) || + (ering->tx_pending <= MAX_SKB_FRAGS + 4)) { + DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EINVAL; + } bp->rx_ring_size = ering->rx_pending; bp->tx_ring_size = ering->tx_pending; @@ -1392,15 +1828,22 @@ static void bnx2x_get_pauseparam(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); int cfg_idx = bnx2x_get_link_cfg_idx(bp); + int cfg_reg; + epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO); - epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) == + if (!epause->autoneg) + cfg_reg = bp->link_params.req_flow_ctrl[cfg_idx]; + else + cfg_reg = bp->link_params.req_fc_auto_adv; + + epause->rx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_RX) == BNX2X_FLOW_CTRL_RX); - epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) == + epause->tx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_TX) == BNX2X_FLOW_CTRL_TX); - DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" + DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n" " autoneg %d rx_pause %d tx_pause %d\n", epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); } @@ -1413,7 +1856,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev, if (IS_MF(bp)) return 0; - DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" + DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n" " autoneg %d rx_pause %d tx_pause %d\n", epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); @@ -1430,7 +1873,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev, if (epause->autoneg) { if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { - DP(NETIF_MSG_LINK, "autoneg not supported\n"); + DP(BNX2X_MSG_ETHTOOL, "autoneg not supported\n"); return -EINVAL; } @@ -1438,9 +1881,18 @@ static int bnx2x_set_pauseparam(struct net_device *dev, bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO; } + bp->link_params.req_fc_auto_adv = 0; + if (epause->rx_pause) + bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX; + + if (epause->tx_pause) + bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX; + + if (!bp->link_params.req_fc_auto_adv) + bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_NONE; } - DP(NETIF_MSG_LINK, + DP(BNX2X_MSG_ETHTOOL, "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]); if (netif_running(dev)) { @@ -1451,19 +1903,160 @@ static int bnx2x_set_pauseparam(struct net_device *dev, return 0; } -static const struct { - char string[ETH_GSTRING_LEN]; -} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = { - { "register_test (offline)" }, - { "memory_test (offline)" }, - { "loopback_test (offline)" }, - { "nvram_test (online)" }, - { "interrupt_test (online)" }, - { "link_test (online)" }, - { "idle check (online)" } +static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = { + "register_test (offline) ", + "memory_test (offline) ", + "int_loopback_test (offline)", + "ext_loopback_test (offline)", + "nvram_test (online) ", + "interrupt_test (online) ", + "link_test (online) " }; enum { + BNX2X_PRI_FLAG_ISCSI, + BNX2X_PRI_FLAG_FCOE, + BNX2X_PRI_FLAG_STORAGE, + BNX2X_PRI_FLAG_LEN, +}; + +static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { + "iSCSI offload support", + "FCoE offload support", + "Storage only interface" +}; + +static u32 bnx2x_eee_to_adv(u32 eee_adv) +{ + u32 modes = 0; + + if (eee_adv & SHMEM_EEE_100M_ADV) + modes |= ADVERTISED_100baseT_Full; + if (eee_adv & SHMEM_EEE_1G_ADV) + modes |= ADVERTISED_1000baseT_Full; + if (eee_adv & SHMEM_EEE_10G_ADV) + modes |= ADVERTISED_10000baseT_Full; + + return modes; +} + +static u32 bnx2x_adv_to_eee(u32 modes, u32 shift) +{ + u32 eee_adv = 0; + if (modes & ADVERTISED_100baseT_Full) + eee_adv |= SHMEM_EEE_100M_ADV; + if (modes & ADVERTISED_1000baseT_Full) + eee_adv |= SHMEM_EEE_1G_ADV; + if (modes & ADVERTISED_10000baseT_Full) + eee_adv |= SHMEM_EEE_10G_ADV; + + return eee_adv << shift; +} + +static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 eee_cfg; + + if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) { + DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n"); + return -EOPNOTSUPP; + } + + eee_cfg = bp->link_vars.eee_status; + + edata->supported = + bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >> + SHMEM_EEE_SUPPORTED_SHIFT); + + edata->advertised = + bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >> + SHMEM_EEE_ADV_STATUS_SHIFT); + edata->lp_advertised = + bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >> + SHMEM_EEE_LP_ADV_STATUS_SHIFT); + + /* SHMEM value is in 16u units --> Convert to 1u units. */ + edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4; + + edata->eee_enabled = (eee_cfg & SHMEM_EEE_REQUESTED_BIT) ? 1 : 0; + edata->eee_active = (eee_cfg & SHMEM_EEE_ACTIVE_BIT) ? 1 : 0; + edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0; + + return 0; +} + +static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 eee_cfg; + u32 advertised; + + if (IS_MF(bp)) + return 0; + + if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) { + DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n"); + return -EOPNOTSUPP; + } + + eee_cfg = bp->link_vars.eee_status; + + if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) { + DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n"); + return -EOPNOTSUPP; + } + + advertised = bnx2x_adv_to_eee(edata->advertised, + SHMEM_EEE_ADV_STATUS_SHIFT); + if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) { + DP(BNX2X_MSG_ETHTOOL, + "Direct manipulation of EEE advertisement is not supported\n"); + return -EINVAL; + } + + if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) { + DP(BNX2X_MSG_ETHTOOL, + "Maximal Tx Lpi timer supported is %x(u)\n", + EEE_MODE_TIMER_MASK); + return -EINVAL; + } + if (edata->tx_lpi_enabled && + (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) { + DP(BNX2X_MSG_ETHTOOL, + "Minimal Tx Lpi timer supported is %d(u)\n", + EEE_MODE_NVRAM_AGGRESSIVE_TIME); + return -EINVAL; + } + + /* All is well; Apply changes*/ + if (edata->eee_enabled) + bp->link_params.eee_mode |= EEE_MODE_ADV_LPI; + else + bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI; + + if (edata->tx_lpi_enabled) + bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI; + else + bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI; + + bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK; + bp->link_params.eee_mode |= (edata->tx_lpi_timer & + EEE_MODE_TIMER_MASK) | + EEE_MODE_OVERRIDE_NVRAM | + EEE_MODE_OUTPUT_TIME; + + /* Restart link to propagate changes */ + if (netif_running(dev)) { + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_force_link_reset(bp); + bnx2x_link_set(bp); + } + + return 0; +} + +enum { BNX2X_CHIP_E1_OFST = 0, BNX2X_CHIP_E1H_OFST, BNX2X_CHIP_E2_OFST, @@ -1572,8 +2165,11 @@ static int bnx2x_test_registers(struct bnx2x *bp) { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 } }; - if (!netif_running(bp->dev)) + if (!bnx2x_is_nvm_accessible(bp)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); return rc; + } if (CHIP_IS_E1(bp)) hw = BNX2X_CHIP_MASK_E1; @@ -1587,7 +2183,8 @@ static int bnx2x_test_registers(struct bnx2x *bp) hw = BNX2X_CHIP_MASK_E3; /* Repeat the test twice: - First by writing 0x00000000, second by writing 0xffffffff */ + * First by writing 0x00000000, second by writing 0xffffffff + */ for (idx = 0; idx < 2; idx++) { switch (idx) { @@ -1618,7 +2215,7 @@ static int bnx2x_test_registers(struct bnx2x *bp) /* verify value is as expected */ if ((val & mask) != (wr_val & mask)) { - DP(NETIF_MSG_HW, + DP(BNX2X_MSG_ETHTOOL, "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n", offset, val, wr_val, mask); goto test_reg_exit; @@ -1672,8 +2269,11 @@ static int bnx2x_test_memory(struct bnx2x *bp) { NULL, 0xffffffff, {0, 0, 0, 0} } }; - if (!netif_running(bp->dev)) + if (!bnx2x_is_nvm_accessible(bp)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); return rc; + } if (CHIP_IS_E1(bp)) index = BNX2X_CHIP_E1_OFST; @@ -1688,7 +2288,7 @@ static int bnx2x_test_memory(struct bnx2x *bp) for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { val = REG_RD(bp, prty_tbl[i].offset); if (val & ~(prty_tbl[i].hw_mask[index])) { - DP(NETIF_MSG_HW, + DP(BNX2X_MSG_ETHTOOL, "%s is 0x%x\n", prty_tbl[i].name, val); goto test_mem_exit; } @@ -1703,7 +2303,7 @@ static int bnx2x_test_memory(struct bnx2x *bp) for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { val = REG_RD(bp, prty_tbl[i].offset); if (val & ~(prty_tbl[i].hw_mask[index])) { - DP(NETIF_MSG_HW, + DP(BNX2X_MSG_ETHTOOL, "%s is 0x%x\n", prty_tbl[i].name, val); goto test_mem_exit; } @@ -1724,7 +2324,15 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes) msleep(20); if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) - DP(NETIF_MSG_LINK, "Timeout waiting for link up\n"); + DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n"); + + cnt = 1400; + while (!bp->link_vars.link_up && cnt--) + msleep(20); + + if (cnt <= 0 && !bp->link_vars.link_up) + DP(BNX2X_MSG_ETHTOOL, + "Timeout waiting for link init\n"); } } @@ -1735,14 +2343,12 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) unsigned char *packet; struct bnx2x_fastpath *fp_rx = &bp->fp[0]; struct bnx2x_fastpath *fp_tx = &bp->fp[0]; - struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0]; + struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0]; u16 tx_start_idx, tx_idx; u16 rx_start_idx, rx_idx; u16 pkt_prod, bd_prod; struct sw_tx_bd *tx_buf; struct eth_tx_start_bd *tx_start_bd; - struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; - struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; dma_addr_t mapping; union eth_rx_cqe *cqe; u8 cqe_fp_flags, cqe_fp_type; @@ -1750,13 +2356,16 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) u16 len; int rc = -ENODEV; u8 *data; - struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); + struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, + txdata->txq_index); /* check the loopback mode */ switch (loopback_mode) { case BNX2X_PHY_LOOPBACK: - if (bp->link_params.loopback_mode != LOOPBACK_XGXS) + if (bp->link_params.loopback_mode != LOOPBACK_XGXS) { + DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n"); return -EINVAL; + } break; case BNX2X_MAC_LOOPBACK: if (CHIP_IS_E3(bp)) { @@ -1773,7 +2382,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) bnx2x_phy_init(&bp->link_params, &bp->link_vars); break; + case BNX2X_EXT_LOOPBACK: + if (bp->link_params.loopback_mode != LOOPBACK_EXT) { + DP(BNX2X_MSG_ETHTOOL, + "Can't configure external loopback\n"); + return -EINVAL; + } + break; default: + DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EINVAL; } @@ -1782,6 +2399,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size); if (!skb) { + DP(BNX2X_MSG_ETHTOOL, "Can't allocate skb\n"); rc = -ENOMEM; goto test_loopback_exit; } @@ -1796,7 +2414,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { rc = -ENOMEM; dev_kfree_skb(skb); - BNX2X_ERR("Unable to map SKB\n"); + DP(BNX2X_MSG_ETHTOOL, "Unable to map SKB\n"); goto test_loopback_exit; } @@ -1822,21 +2440,32 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; SET_FLAG(tx_start_bd->general_data, - ETH_TX_START_BD_ETH_ADDR_TYPE, - UNICAST_ADDRESS); - SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); + SET_FLAG(tx_start_bd->general_data, + ETH_TX_START_BD_PARSE_NBDS, + 0); /* turn on parsing and get a BD */ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; - pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; - - memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); - memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); - + if (CHIP_IS_E1x(bp)) { + u16 global_data = 0; + struct eth_tx_parse_bd_e1x *pbd_e1x = + &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; + memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); + SET_FLAG(global_data, + ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, UNICAST_ADDRESS); + pbd_e1x->global_data = cpu_to_le16(global_data); + } else { + u32 parsing_data = 0; + struct eth_tx_parse_bd_e2 *pbd_e2 = + &txdata->tx_desc_ring[bd_prod].parse_bd_e2; + memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); + SET_FLAG(parsing_data, + ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, UNICAST_ADDRESS); + pbd_e2->parsing_data = cpu_to_le32(parsing_data); + } wmb(); txdata->tx_db.data.prod += 2; @@ -1879,7 +2508,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) goto test_loopback_rx_exit; - len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); + len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len); if (len != pkt_size) goto test_loopback_rx_exit; @@ -1926,13 +2555,13 @@ static int bnx2x_test_loopback(struct bnx2x *bp) res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK); if (res) { - DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res); + DP(BNX2X_MSG_ETHTOOL, " PHY loopback failed (res %d)\n", res); rc |= BNX2X_PHY_LOOPBACK_FAILED; } res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK); if (res) { - DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res); + DP(BNX2X_MSG_ETHTOOL, " MAC loopback failed (res %d)\n", res); rc |= BNX2X_MAC_LOOPBACK_FAILED; } @@ -1942,14 +2571,200 @@ static int bnx2x_test_loopback(struct bnx2x *bp) return rc; } +static int bnx2x_test_ext_loopback(struct bnx2x *bp) +{ + int rc; + u8 is_serdes = + (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; + + if (BP_NOMCP(bp)) + return -ENODEV; + + if (!netif_running(bp->dev)) + return BNX2X_EXT_LOOPBACK_FAILED; + + bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); + rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT); + if (rc) { + DP(BNX2X_MSG_ETHTOOL, + "Can't perform self-test, nic_load (for external lb) failed\n"); + return -ENODEV; + } + bnx2x_wait_for_link(bp, 1, is_serdes); + + bnx2x_netif_stop(bp, 1); + + rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK); + if (rc) + DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc); + + bnx2x_netif_start(bp); + + return rc; +} + +struct code_entry { + u32 sram_start_addr; + u32 code_attribute; +#define CODE_IMAGE_TYPE_MASK 0xf0800003 +#define CODE_IMAGE_VNTAG_PROFILES_DATA 0xd0000003 +#define CODE_IMAGE_LENGTH_MASK 0x007ffffc +#define CODE_IMAGE_TYPE_EXTENDED_DIR 0xe0000000 + u32 nvm_start_addr; +}; + +#define CODE_ENTRY_MAX 16 +#define CODE_ENTRY_EXTENDED_DIR_IDX 15 +#define MAX_IMAGES_IN_EXTENDED_DIR 64 +#define NVRAM_DIR_OFFSET 0x14 + +#define EXTENDED_DIR_EXISTS(code) \ + ((code & CODE_IMAGE_TYPE_MASK) == CODE_IMAGE_TYPE_EXTENDED_DIR && \ + (code & CODE_IMAGE_LENGTH_MASK) != 0) + #define CRC32_RESIDUAL 0xdebb20e3 +#define CRC_BUFF_SIZE 256 + +static int bnx2x_nvram_crc(struct bnx2x *bp, + int offset, + int size, + u8 *buff) +{ + u32 crc = ~0; + int rc = 0, done = 0; + + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "NVRAM CRC from 0x%08x to 0x%08x\n", offset, offset + size); + + while (done < size) { + int count = min_t(int, size - done, CRC_BUFF_SIZE); + + rc = bnx2x_nvram_read(bp, offset + done, buff, count); + + if (rc) + return rc; + + crc = crc32_le(crc, buff, count); + done += count; + } + + if (crc != CRC32_RESIDUAL) + rc = -EINVAL; + + return rc; +} + +static int bnx2x_test_nvram_dir(struct bnx2x *bp, + struct code_entry *entry, + u8 *buff) +{ + size_t size = entry->code_attribute & CODE_IMAGE_LENGTH_MASK; + u32 type = entry->code_attribute & CODE_IMAGE_TYPE_MASK; + int rc; + + /* Zero-length images and AFEX profiles do not have CRC */ + if (size == 0 || type == CODE_IMAGE_VNTAG_PROFILES_DATA) + return 0; + + rc = bnx2x_nvram_crc(bp, entry->nvm_start_addr, size, buff); + if (rc) + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "image %x has failed crc test (rc %d)\n", type, rc); + + return rc; +} + +static int bnx2x_test_dir_entry(struct bnx2x *bp, u32 addr, u8 *buff) +{ + int rc; + struct code_entry entry; + + rc = bnx2x_nvram_read32(bp, addr, (u32 *)&entry, sizeof(entry)); + if (rc) + return rc; + + return bnx2x_test_nvram_dir(bp, &entry, buff); +} + +static int bnx2x_test_nvram_ext_dirs(struct bnx2x *bp, u8 *buff) +{ + u32 rc, cnt, dir_offset = NVRAM_DIR_OFFSET; + struct code_entry entry; + int i; + + rc = bnx2x_nvram_read32(bp, + dir_offset + + sizeof(entry) * CODE_ENTRY_EXTENDED_DIR_IDX, + (u32 *)&entry, sizeof(entry)); + if (rc) + return rc; + + if (!EXTENDED_DIR_EXISTS(entry.code_attribute)) + return 0; + + rc = bnx2x_nvram_read32(bp, entry.nvm_start_addr, + &cnt, sizeof(u32)); + if (rc) + return rc; + + dir_offset = entry.nvm_start_addr + 8; + + for (i = 0; i < cnt && i < MAX_IMAGES_IN_EXTENDED_DIR; i++) { + rc = bnx2x_test_dir_entry(bp, dir_offset + + sizeof(struct code_entry) * i, + buff); + if (rc) + return rc; + } + + return 0; +} + +static int bnx2x_test_nvram_dirs(struct bnx2x *bp, u8 *buff) +{ + u32 rc, dir_offset = NVRAM_DIR_OFFSET; + int i; + + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "NVRAM DIRS CRC test-set\n"); + + for (i = 0; i < CODE_ENTRY_EXTENDED_DIR_IDX; i++) { + rc = bnx2x_test_dir_entry(bp, dir_offset + + sizeof(struct code_entry) * i, + buff); + if (rc) + return rc; + } + + return bnx2x_test_nvram_ext_dirs(bp, buff); +} + +struct crc_pair { + int offset; + int size; +}; + +static int bnx2x_test_nvram_tbl(struct bnx2x *bp, + const struct crc_pair *nvram_tbl, u8 *buf) +{ + int i; + + for (i = 0; nvram_tbl[i].size; i++) { + int rc = bnx2x_nvram_crc(bp, nvram_tbl[i].offset, + nvram_tbl[i].size, buf); + if (rc) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "nvram_tbl[%d] has failed crc test (rc %d)\n", + i, rc); + return rc; + } + } + + return 0; +} static int bnx2x_test_nvram(struct bnx2x *bp) { - static const struct { - int offset; - int size; - } nvram_tbl[] = { + const struct crc_pair nvram_tbl[] = { { 0, 0x14 }, /* bootstrap */ { 0x14, 0xec }, /* dir */ { 0x100, 0x350 }, /* manuf_info */ @@ -1958,59 +2773,77 @@ static int bnx2x_test_nvram(struct bnx2x *bp) { 0x708, 0x70 }, /* manuf_key_info */ { 0, 0 } }; - __be32 buf[0x350 / 4]; - u8 *data = (u8 *)buf; - int i, rc; - u32 magic, crc; + const struct crc_pair nvram_tbl2[] = { + { 0x7e8, 0x350 }, /* manuf_info2 */ + { 0xb38, 0xf0 }, /* feature_info */ + { 0, 0 } + }; + + u8 *buf; + int rc; + u32 magic; if (BP_NOMCP(bp)) return 0; - rc = bnx2x_nvram_read(bp, 0, data, 4); + buf = kmalloc(CRC_BUFF_SIZE, GFP_KERNEL); + if (!buf) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n"); + rc = -ENOMEM; + goto test_nvram_exit; + } + + rc = bnx2x_nvram_read32(bp, 0, &magic, sizeof(magic)); if (rc) { - DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc); + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "magic value read (rc %d)\n", rc); goto test_nvram_exit; } - magic = be32_to_cpu(buf[0]); if (magic != 0x669955aa) { - DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic); + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "wrong magic value (0x%08x)\n", magic); rc = -ENODEV; goto test_nvram_exit; } - for (i = 0; nvram_tbl[i].size; i++) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "Port 0 CRC test-set\n"); + rc = bnx2x_test_nvram_tbl(bp, nvram_tbl, buf); + if (rc) + goto test_nvram_exit; - rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data, - nvram_tbl[i].size); - if (rc) { - DP(NETIF_MSG_PROBE, - "nvram_tbl[%d] read data (rc %d)\n", i, rc); - goto test_nvram_exit; - } + if (!CHIP_IS_E1x(bp) && !CHIP_IS_57811xx(bp)) { + u32 hide = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & + SHARED_HW_CFG_HIDE_PORT1; - crc = ether_crc_le(nvram_tbl[i].size, data); - if (crc != CRC32_RESIDUAL) { - DP(NETIF_MSG_PROBE, - "nvram_tbl[%d] crc value (0x%08x)\n", i, crc); - rc = -ENODEV; - goto test_nvram_exit; + if (!hide) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "Port 1 CRC test-set\n"); + rc = bnx2x_test_nvram_tbl(bp, nvram_tbl2, buf); + if (rc) + goto test_nvram_exit; } } + rc = bnx2x_test_nvram_dirs(bp, buf); + test_nvram_exit: + kfree(buf); return rc; } /* Send an EMPTY ramrod on the first queue */ static int bnx2x_test_intr(struct bnx2x *bp) { - struct bnx2x_queue_state_params params = {0}; + struct bnx2x_queue_state_params params = {NULL}; - if (!netif_running(bp->dev)) + if (!netif_running(bp->dev)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); return -ENODEV; + } - params.q_obj = &bp->fp->q_obj; + params.q_obj = &bp->sp_objs->q_obj; params.cmd = BNX2X_Q_CMD_EMPTY; __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); @@ -2022,37 +2855,57 @@ static void bnx2x_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) { struct bnx2x *bp = netdev_priv(dev); - u8 is_serdes; + u8 is_serdes, link_up; + int rc, cnt = 0; + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - pr_err("Handling parity error recovery. Try again later\n"); + netdev_err(bp->dev, + "Handling parity error recovery. Try again later\n"); etest->flags |= ETH_TEST_FL_FAILED; return; } - memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); + DP(BNX2X_MSG_ETHTOOL, + "Self-test command parameters: offline = %d, external_lb = %d\n", + (etest->flags & ETH_TEST_FL_OFFLINE), + (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2); + + memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp)); + + if (bnx2x_test_nvram(bp) != 0) { + if (!IS_MF(bp)) + buf[4] = 1; + else + buf[0] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } - if (!netif_running(dev)) + if (!netif_running(dev)) { + DP(BNX2X_MSG_ETHTOOL, "Interface is down\n"); return; + } - /* offline tests are not supported in MF mode */ - if (IS_MF(bp)) - etest->flags &= ~ETH_TEST_FL_OFFLINE; is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; - - if (etest->flags & ETH_TEST_FL_OFFLINE) { + link_up = bp->link_vars.link_up; + /* offline tests are not supported in MF mode */ + if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) { int port = BP_PORT(bp); u32 val; - u8 link_up; /* save current value of input enable for TX port IF */ val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4); /* disable input for TX port IF */ REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); - link_up = bp->link_vars.link_up; + bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); + rc = bnx2x_nic_load(bp, LOAD_DIAG); + if (rc) { + etest->flags |= ETH_TEST_FL_FAILED; + DP(BNX2X_MSG_ETHTOOL, + "Can't perform self-test, nic_load (for offline) failed\n"); + return; + } - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - bnx2x_nic_load(bp, LOAD_DIAG); /* wait until link state is restored */ bnx2x_wait_for_link(bp, 1, is_serdes); @@ -2065,48 +2918,66 @@ static void bnx2x_self_test(struct net_device *dev, etest->flags |= ETH_TEST_FL_FAILED; } - buf[2] = bnx2x_test_loopback(bp); + buf[2] = bnx2x_test_loopback(bp); /* internal LB */ if (buf[2] != 0) etest->flags |= ETH_TEST_FL_FAILED; - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) { + buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */ + if (buf[3] != 0) + etest->flags |= ETH_TEST_FL_FAILED; + etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + } + + bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); /* restore input for TX port IF */ REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); - - bnx2x_nic_load(bp, LOAD_NORMAL); + rc = bnx2x_nic_load(bp, LOAD_NORMAL); + if (rc) { + etest->flags |= ETH_TEST_FL_FAILED; + DP(BNX2X_MSG_ETHTOOL, + "Can't perform self-test, nic_load (for online) failed\n"); + return; + } /* wait until link state is restored */ bnx2x_wait_for_link(bp, link_up, is_serdes); } - if (bnx2x_test_nvram(bp) != 0) { - buf[3] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } + if (bnx2x_test_intr(bp) != 0) { - buf[4] = 1; + if (!IS_MF(bp)) + buf[5] = 1; + else + buf[1] = 1; etest->flags |= ETH_TEST_FL_FAILED; } - if (bnx2x_link_test(bp, is_serdes) != 0) { - buf[5] = 1; - etest->flags |= ETH_TEST_FL_FAILED; + if (link_up) { + cnt = 100; + while (bnx2x_link_test(bp, is_serdes) && --cnt) + msleep(20); } -#ifdef BNX2X_EXTRA_DEBUG - bnx2x_panic_dump(bp); -#endif + if (!cnt) { + if (!IS_MF(bp)) + buf[6] = 1; + else + buf[2] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } } #define IS_PORT_STAT(i) \ ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) -#define IS_MF_MODE_STAT(bp) \ - (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) +#define HIDE_PORT_STAT(bp) \ + ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \ + IS_VF(bp)) /* ethtool statistics are displayed for all regular ethernet queues and the * fcoe L2 queue if not disabled */ -static inline int bnx2x_num_stat_queues(struct bnx2x *bp) +static int bnx2x_num_stat_queues(struct bnx2x *bp) { return BNX2X_NUM_ETH_QUEUES(bp); } @@ -2114,36 +2985,51 @@ static inline int bnx2x_num_stat_queues(struct bnx2x *bp) static int bnx2x_get_sset_count(struct net_device *dev, int stringset) { struct bnx2x *bp = netdev_priv(dev); - int i, num_stats; + int i, num_strings = 0; switch (stringset) { case ETH_SS_STATS: if (is_multi(bp)) { - num_stats = bnx2x_num_stat_queues(bp) * - BNX2X_NUM_Q_STATS; + num_strings = bnx2x_num_stat_queues(bp) * + BNX2X_NUM_Q_STATS; } else - num_stats = 0; - if (IS_MF_MODE_STAT(bp)) { + num_strings = 0; + if (HIDE_PORT_STAT(bp)) { for (i = 0; i < BNX2X_NUM_STATS; i++) if (IS_FUNC_STAT(i)) - num_stats++; + num_strings++; } else - num_stats += BNX2X_NUM_STATS; + num_strings += BNX2X_NUM_STATS; - return num_stats; + return num_strings; case ETH_SS_TEST: - return BNX2X_NUM_TESTS; + return BNX2X_NUM_TESTS(bp); + + case ETH_SS_PRIV_FLAGS: + return BNX2X_PRI_FLAG_LEN; default: return -EINVAL; } } +static u32 bnx2x_get_private_flags(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 flags = 0; + + flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI; + flags |= (!(bp->flags & NO_FCOE_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_FCOE; + flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE; + + return flags; +} + static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct bnx2x *bp = netdev_priv(dev); - int i, j, k; + int i, j, k, start; char queue_name[MAX_QUEUE_NAME_LEN+1]; switch (stringset) { @@ -2162,9 +3048,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) } } - for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) + if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i)) continue; strcpy(buf + (k + j)*ETH_GSTRING_LEN, bnx2x_stats_arr[i].string); @@ -2174,7 +3059,18 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) break; case ETH_SS_TEST: - memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr)); + /* First 4 tests cannot be done in MF mode */ + if (!IS_MF(bp)) + start = 0; + else + start = 4; + memcpy(buf, bnx2x_tests_str_arr + start, + ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp)); + break; + + case ETH_SS_PRIV_FLAGS: + memcpy(buf, bnx2x_private_arr, + ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN); break; } } @@ -2188,7 +3084,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, if (is_multi(bp)) { for_each_eth_queue(bp, i) { - hw_stats = (u32 *)&bp->fp[i].eth_q_stats; + hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats; for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { if (bnx2x_q_stats_arr[j].size == 0) { /* skip this counter */ @@ -2211,7 +3107,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, hw_stats = (u32 *)&bp->eth_stats; for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) + if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i)) continue; if (bnx2x_stats_arr[i].size == 0) { /* skip this counter */ @@ -2237,31 +3133,70 @@ static int bnx2x_set_phys_id(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); - if (!netif_running(dev)) + if (!bnx2x_is_nvm_accessible(bp)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); return -EAGAIN; - - if (!bp->port.pmf) - return -EOPNOTSUPP; + } switch (state) { case ETHTOOL_ID_ACTIVE: return 1; /* cycle on/off once per second */ case ETHTOOL_ID_ON: + bnx2x_acquire_phy_lock(bp); bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_ON, SPEED_1000); + bnx2x_release_phy_lock(bp); break; case ETHTOOL_ID_OFF: + bnx2x_acquire_phy_lock(bp); bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_FRONT_PANEL_OFF, 0); - + bnx2x_release_phy_lock(bp); break; case ETHTOOL_ID_INACTIVE: + bnx2x_acquire_phy_lock(bp); bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER, bp->link_vars.line_speed); + bnx2x_release_phy_lock(bp); + } + + return 0; +} + +static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) +{ + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (bp->rss_conf_obj.udp_rss_v4) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V6_FLOW: + if (bp->rss_conf_obj.udp_rss_v6) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case IPV4_FLOW: + case IPV6_FLOW: + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + default: + info->data = 0; + break; } return 0; @@ -2276,21 +3211,113 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, case ETHTOOL_GRXRINGS: info->data = BNX2X_NUM_ETH_QUEUES(bp); return 0; - + case ETHTOOL_GRXFH: + return bnx2x_get_rss_flags(bp, info); default: + DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EOPNOTSUPP; } } -static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev) +static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) +{ + int udp_rss_requested; + + DP(BNX2X_MSG_ETHTOOL, + "Set rss flags command parameters: flow type = %d, data = %llu\n", + info->flow_type, info->data); + + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + /* For TCP only 4-tupple hash is supported */ + if (info->data ^ (RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + DP(BNX2X_MSG_ETHTOOL, + "Command parameters not supported\n"); + return -EINVAL; + } + return 0; + + case UDP_V4_FLOW: + case UDP_V6_FLOW: + /* For UDP either 2-tupple hash or 4-tupple hash is supported */ + if (info->data == (RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + udp_rss_requested = 1; + else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) + udp_rss_requested = 0; + else + return -EINVAL; + if ((info->flow_type == UDP_V4_FLOW) && + (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) { + bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested; + DP(BNX2X_MSG_ETHTOOL, + "rss re-configured, UDP 4-tupple %s\n", + udp_rss_requested ? "enabled" : "disabled"); + return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); + } else if ((info->flow_type == UDP_V6_FLOW) && + (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { + bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; + DP(BNX2X_MSG_ETHTOOL, + "rss re-configured, UDP 4-tupple %s\n", + udp_rss_requested ? "enabled" : "disabled"); + return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); + } + return 0; + + case IPV4_FLOW: + case IPV6_FLOW: + /* For IP only 2-tupple hash is supported */ + if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) { + DP(BNX2X_MSG_ETHTOOL, + "Command parameters not supported\n"); + return -EINVAL; + } + return 0; + + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IP_USER_FLOW: + case ETHER_FLOW: + /* RSS is not supported for these protocols */ + if (info->data) { + DP(BNX2X_MSG_ETHTOOL, + "Command parameters not supported\n"); + return -EINVAL; + } + return 0; + + default: + return -EINVAL; + } +} + +static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) { struct bnx2x *bp = netdev_priv(dev); - return (bp->multi_mode == ETH_RSS_MODE_DISABLED ? - 0 : T_ETH_INDIRECTION_TABLE_SIZE); + switch (info->cmd) { + case ETHTOOL_SRXFH: + return bnx2x_set_rss_flags(bp, info); + default: + DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); + return -EOPNOTSUPP; + } } -static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) +static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev) +{ + return T_ETH_INDIRECTION_TABLE_SIZE; +} + +static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key) { struct bnx2x *bp = netdev_priv(dev); u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; @@ -2314,15 +3341,15 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) return 0; } -static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) +static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key) { struct bnx2x *bp = netdev_priv(dev); size_t i; - u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { /* - * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy() + * The same as in bnx2x_get_rxfh: we can't use a memcpy() * as an internal storage of an indirection table is a u8 array * while indir->ring_index points to an array of u32. * @@ -2330,10 +3357,87 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) * align the received table to the Client ID of the leading RSS * queue */ - ind_table[i] = indir[i] + bp->fp->cl_id; + bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id; + } + + return bnx2x_config_rss_eth(bp, false); +} + +/** + * bnx2x_get_channels - gets the number of RSS queues. + * + * @dev: net device + * @channels: returns the number of max / current queues + */ +static void bnx2x_get_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct bnx2x *bp = netdev_priv(dev); + + channels->max_combined = BNX2X_MAX_RSS_COUNT(bp); + channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp); +} + +/** + * bnx2x_change_num_queues - change the number of RSS queues. + * + * @bp: bnx2x private structure + * + * Re-configure interrupt mode to get the new number of MSI-X + * vectors and re-add NAPI objects. + */ +static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) +{ + bnx2x_disable_msi(bp); + bp->num_ethernet_queues = num_rss; + bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; + BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); + bnx2x_set_int_mode(bp); +} + +/** + * bnx2x_set_channels - sets the number of RSS queues. + * + * @dev: net device + * @channels: includes the number of queues requested + */ +static int bnx2x_set_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct bnx2x *bp = netdev_priv(dev); + + DP(BNX2X_MSG_ETHTOOL, + "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", + channels->rx_count, channels->tx_count, channels->other_count, + channels->combined_count); + + /* We don't support separate rx / tx channels. + * We don't allow setting 'other' channels. + */ + if (channels->rx_count || channels->tx_count || channels->other_count + || (channels->combined_count == 0) || + (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) { + DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n"); + return -EINVAL; } - return bnx2x_config_rss_pf(bp, ind_table, false); + /* Check if there was a change in the active parameters */ + if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) { + DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n"); + return 0; + } + + /* Set the requested number of queues in bp context. + * Note that the actual number of queues created during load may be + * less than requested if memory is low. + */ + if (unlikely(!netif_running(dev))) { + bnx2x_change_num_queues(bp, channels->combined_count); + return 0; + } + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); + bnx2x_change_num_queues(bp, channels->combined_count); + return bnx2x_nic_load(bp, LOAD_NORMAL); } static const struct ethtool_ops bnx2x_ethtool_ops = { @@ -2342,6 +3446,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .get_drvinfo = bnx2x_get_drvinfo, .get_regs_len = bnx2x_get_regs_len, .get_regs = bnx2x_get_regs, + .get_dump_flag = bnx2x_get_dump_flag, + .get_dump_data = bnx2x_get_dump_data, + .set_dump = bnx2x_set_dump, .get_wol = bnx2x_get_wol, .set_wol = bnx2x_set_wol, .get_msglevel = bnx2x_get_msglevel, @@ -2359,16 +3466,48 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .set_pauseparam = bnx2x_set_pauseparam, .self_test = bnx2x_self_test, .get_sset_count = bnx2x_get_sset_count, + .get_priv_flags = bnx2x_get_private_flags, .get_strings = bnx2x_get_strings, .set_phys_id = bnx2x_set_phys_id, .get_ethtool_stats = bnx2x_get_ethtool_stats, .get_rxnfc = bnx2x_get_rxnfc, + .set_rxnfc = bnx2x_set_rxnfc, + .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, + .get_rxfh = bnx2x_get_rxfh, + .set_rxfh = bnx2x_set_rxfh, + .get_channels = bnx2x_get_channels, + .set_channels = bnx2x_set_channels, + .get_module_info = bnx2x_get_module_info, + .get_module_eeprom = bnx2x_get_module_eeprom, + .get_eee = bnx2x_get_eee, + .set_eee = bnx2x_set_eee, + .get_ts_info = ethtool_op_get_ts_info, +}; + +static const struct ethtool_ops bnx2x_vf_ethtool_ops = { + .get_settings = bnx2x_get_settings, + .set_settings = bnx2x_set_settings, + .get_drvinfo = bnx2x_get_drvinfo, + .get_msglevel = bnx2x_get_msglevel, + .set_msglevel = bnx2x_set_msglevel, + .get_link = bnx2x_get_link, + .get_coalesce = bnx2x_get_coalesce, + .get_ringparam = bnx2x_get_ringparam, + .set_ringparam = bnx2x_set_ringparam, + .get_sset_count = bnx2x_get_sset_count, + .get_strings = bnx2x_get_strings, + .get_ethtool_stats = bnx2x_get_ethtool_stats, + .get_rxnfc = bnx2x_get_rxnfc, + .set_rxnfc = bnx2x_set_rxnfc, .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, - .get_rxfh_indir = bnx2x_get_rxfh_indir, - .set_rxfh_indir = bnx2x_set_rxfh_indir, + .get_rxfh = bnx2x_get_rxfh, + .set_rxfh = bnx2x_set_rxfh, + .get_channels = bnx2x_get_channels, + .set_channels = bnx2x_set_channels, }; -void bnx2x_set_ethtool_ops(struct net_device *netdev) +void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev) { - SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops); + netdev->ethtool_ops = (IS_PF(bp)) ? + &bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 998652a1b85..95dc3654354 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -1,6 +1,6 @@ /* bnx2x_fw_defs.h: Broadcom Everest network driver. * - * Copyright (c) 2007-2011 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -23,33 +23,38 @@ (IRO[159].base + ((funcId) * IRO[159].m1)) #define CSTORM_FUNC_EN_OFFSET(funcId) \ (IRO[149].base + ((funcId) * IRO[149].m1)) +#define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \ + (IRO[139].base + ((hcIndex) * IRO[139].m1) + ((sbId) * IRO[139].m2)) +#define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \ + (IRO[138].base + (((hcIndex)>>2) * IRO[138].m1) + (((hcIndex)&3) \ + * IRO[138].m2) + ((sbId) * IRO[138].m3)) #define CSTORM_IGU_MODE_OFFSET (IRO[157].base) #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[315].base + ((pfId) * IRO[315].m1)) + (IRO[317].base + ((pfId) * IRO[317].m1)) #define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ - (IRO[316].base + ((pfId) * IRO[316].m1)) + (IRO[318].base + ((pfId) * IRO[318].m1)) #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ - (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2)) -#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) +#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ + (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ - (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) -#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) +#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ + (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ - (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2)) + (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ - (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) + (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2)) #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ - (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) + (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2)) #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[314].base + ((pfId) * IRO[314].m1)) + (IRO[316].base + ((pfId) * IRO[316].m1)) #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[306].base + ((pfId) * IRO[306].m1)) + (IRO[308].base + ((pfId) * IRO[308].m1)) #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[305].base + ((pfId) * IRO[305].m1)) + (IRO[307].base + ((pfId) * IRO[307].m1)) #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[304].base + ((pfId) * IRO[304].m1)) + (IRO[306].base + ((pfId) * IRO[306].m1)) #define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ (IRO[151].base + ((funcId) * IRO[151].m1)) #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ @@ -82,51 +87,47 @@ (IRO[156].base + ((vfId) * IRO[156].m1)) #define CSTORM_VF_TO_PF_OFFSET(funcId) \ (IRO[150].base + ((funcId) * IRO[150].m1)) -#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base) #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ (IRO[203].base + ((pfId) * IRO[203].m1)) #define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ (IRO[101].base + ((assertListEntry) * IRO[101].m1)) -#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base) -#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \ - (IRO[108].base) #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ (IRO[201].base + ((pfId) * IRO[201].m1)) #define TSTORM_FUNC_EN_OFFSET(funcId) \ (IRO[103].base + ((funcId) * IRO[103].m1)) #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ - (IRO[271].base + ((pfId) * IRO[271].m1)) -#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ (IRO[272].base + ((pfId) * IRO[272].m1)) -#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ +#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ (IRO[273].base + ((pfId) * IRO[273].m1)) -#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ +#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ (IRO[274].base + ((pfId) * IRO[274].m1)) +#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ + (IRO[275].base + ((pfId) * IRO[275].m1)) #define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[270].base + ((pfId) * IRO[270].m1)) + (IRO[271].base + ((pfId) * IRO[271].m1)) #define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[269].base + ((pfId) * IRO[269].m1)) + (IRO[270].base + ((pfId) * IRO[270].m1)) #define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[268].base + ((pfId) * IRO[268].m1)) + (IRO[269].base + ((pfId) * IRO[269].m1)) #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ - (IRO[267].base + ((pfId) * IRO[267].m1)) + (IRO[268].base + ((pfId) * IRO[268].m1)) #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ - (IRO[276].base + ((pfId) * IRO[276].m1)) + (IRO[278].base + ((pfId) * IRO[278].m1)) #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ - (IRO[263].base + ((pfId) * IRO[263].m1)) -#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ (IRO[264].base + ((pfId) * IRO[264].m1)) -#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ +#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ (IRO[265].base + ((pfId) * IRO[265].m1)) -#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ +#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ (IRO[266].base + ((pfId) * IRO[266].m1)) +#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ + (IRO[267].base + ((pfId) * IRO[267].m1)) #define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ (IRO[202].base + ((pfId) * IRO[202].m1)) #define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ (IRO[105].base + ((funcId) * IRO[105].m1)) #define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ - (IRO[216].base + ((pfId) * IRO[216].m1)) + (IRO[217].base + ((pfId) * IRO[217].m1)) #define TSTORM_VF_TO_PF_OFFSET(funcId) \ (IRO[104].base + ((funcId) * IRO[104].m1)) #define USTORM_AGG_DATA_OFFSET (IRO[206].base) @@ -134,35 +135,32 @@ #define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base) #define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ (IRO[176].base + ((assertListEntry) * IRO[176].m1)) -#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \ - (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \ - IRO[205].m2)) #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ (IRO[183].base + ((portId) * IRO[183].m1)) #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ - (IRO[317].base + ((pfId) * IRO[317].m1)) + (IRO[319].base + ((pfId) * IRO[319].m1)) #define USTORM_FUNC_EN_OFFSET(funcId) \ (IRO[178].base + ((funcId) * IRO[178].m1)) #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[281].base + ((pfId) * IRO[281].m1)) + (IRO[283].base + ((pfId) * IRO[283].m1)) #define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ - (IRO[282].base + ((pfId) * IRO[282].m1)) + (IRO[284].base + ((pfId) * IRO[284].m1)) #define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ - (IRO[286].base + ((pfId) * IRO[286].m1)) + (IRO[288].base + ((pfId) * IRO[288].m1)) #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ - (IRO[283].base + ((pfId) * IRO[283].m1)) + (IRO[285].base + ((pfId) * IRO[285].m1)) #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[279].base + ((pfId) * IRO[279].m1)) + (IRO[281].base + ((pfId) * IRO[281].m1)) #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[278].base + ((pfId) * IRO[278].m1)) + (IRO[280].base + ((pfId) * IRO[280].m1)) #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[277].base + ((pfId) * IRO[277].m1)) + (IRO[279].base + ((pfId) * IRO[279].m1)) #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[280].base + ((pfId) * IRO[280].m1)) + (IRO[282].base + ((pfId) * IRO[282].m1)) #define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ - (IRO[284].base + ((pfId) * IRO[284].m1)) + (IRO[286].base + ((pfId) * IRO[286].m1)) #define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ - (IRO[285].base + ((pfId) * IRO[285].m1)) + (IRO[287].base + ((pfId) * IRO[287].m1)) #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ (IRO[182].base + ((pfId) * IRO[182].m1)) #define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ @@ -188,39 +186,39 @@ #define XSTORM_FUNC_EN_OFFSET(funcId) \ (IRO[47].base + ((funcId) * IRO[47].m1)) #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[294].base + ((pfId) * IRO[294].m1)) + (IRO[296].base + ((pfId) * IRO[296].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ - (IRO[297].base + ((pfId) * IRO[297].m1)) + (IRO[299].base + ((pfId) * IRO[299].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ - (IRO[298].base + ((pfId) * IRO[298].m1)) + (IRO[300].base + ((pfId) * IRO[300].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ - (IRO[299].base + ((pfId) * IRO[299].m1)) + (IRO[301].base + ((pfId) * IRO[301].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ - (IRO[300].base + ((pfId) * IRO[300].m1)) + (IRO[302].base + ((pfId) * IRO[302].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ - (IRO[301].base + ((pfId) * IRO[301].m1)) + (IRO[303].base + ((pfId) * IRO[303].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ - (IRO[302].base + ((pfId) * IRO[302].m1)) + (IRO[304].base + ((pfId) * IRO[304].m1)) #define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ - (IRO[303].base + ((pfId) * IRO[303].m1)) + (IRO[305].base + ((pfId) * IRO[305].m1)) #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[293].base + ((pfId) * IRO[293].m1)) + (IRO[295].base + ((pfId) * IRO[295].m1)) #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[292].base + ((pfId) * IRO[292].m1)) + (IRO[294].base + ((pfId) * IRO[294].m1)) #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[291].base + ((pfId) * IRO[291].m1)) + (IRO[293].base + ((pfId) * IRO[293].m1)) #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[296].base + ((pfId) * IRO[296].m1)) + (IRO[298].base + ((pfId) * IRO[298].m1)) #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ - (IRO[295].base + ((pfId) * IRO[295].m1)) + (IRO[297].base + ((pfId) * IRO[297].m1)) #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ - (IRO[290].base + ((pfId) * IRO[290].m1)) + (IRO[292].base + ((pfId) * IRO[292].m1)) #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ - (IRO[289].base + ((pfId) * IRO[289].m1)) + (IRO[291].base + ((pfId) * IRO[291].m1)) #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ - (IRO[288].base + ((pfId) * IRO[288].m1)) + (IRO[290].base + ((pfId) * IRO[290].m1)) #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ - (IRO[287].base + ((pfId) * IRO[287].m1)) + (IRO[289].base + ((pfId) * IRO[289].m1)) #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ (IRO[44].base + ((pfId) * IRO[44].m1)) #define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ @@ -243,18 +241,6 @@ (IRO[48].base + ((funcId) * IRO[48].m1)) #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 -/** -* This file defines HSI constants for the ETH flow -*/ -#ifdef _EVEREST_MICROCODE -#include "Microcode\Generated\DataTypes\eth_rx_bd.h" -#include "Microcode\Generated\DataTypes\eth_tx_bd.h" -#include "Microcode\Generated\DataTypes\eth_rx_cqe.h" -#include "Microcode\Generated\DataTypes\eth_rx_sge.h" -#include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h" -#endif - - /* Ethernet Ring parameters */ #define X_ETH_LOCAL_RING_SIZE 13 #define FIRST_BD_IN_PKT 0 @@ -315,12 +301,10 @@ #define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */ #define MAX_VLAN_CREDIT_E2 272 /* Per Path */ - /* Maximal aggregation queues supported */ #define ETH_MAX_AGGREGATION_QUEUES_E1 32 #define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64 - #define ETH_NUM_OF_MCAST_BINS 256 #define ETH_NUM_OF_MCAST_ENGINES_E2 72 @@ -333,9 +317,7 @@ #define DISABLE_STATISTIC_COUNTER_ID_VALUE 0 -/** - * This file defines HSI constants common to all microcode flows - */ +/* This file defines HSI constants common to all microcode flows */ #define PROTOCOL_STATE_BIT_OFFSET 6 @@ -365,7 +347,6 @@ /* max number of slow path commands per port */ #define MAX_RAMRODS_PER_PORT 8 - /**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ #define TIMERS_TICK_SIZE_CHIP (1e-3) @@ -392,19 +373,20 @@ that is not mapped to priority*/ #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF - #define C_ERES_PER_PAGE \ (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem))) #define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1) #define STATS_QUERY_CMD_COUNT 16 -#define NIV_LIST_TABLE_SIZE 4096 +#define AFEX_LIST_TABLE_SIZE 4096 #define INVALID_VNIC_ID 0xFF - #define UNDEF_IRO 0x80000000 +/* used for defining the amount of FCoE tasks supported for PF */ +#define MAX_FCOE_FUNCS_PER_ENGINE 2 +#define MAX_NUM_FCOE_TASKS_PER_ENGINE 4096 #endif /* BNX2X_FW_DEFS_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h index f4a07fbaed0..8aafd9b5d6a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h @@ -1,13 +1,13 @@ /* bnx2x_fw_file_hdr.h: FW binary file header structure. * - * Copyright (c) 2007-2011 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> - * Written by: Vladislav Zolotarov <vladz@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> + * Written by: Vladislav Zolotarov * Based on the original idea of John Wright <john.wright@hp.com>. */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 3e30c8642c2..5ba8af50c84 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1,6 +1,6 @@ /* bnx2x_hsi.h: Broadcom Everest network driver. * - * Copyright (c) 2007-2011 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -10,6 +10,7 @@ #define BNX2X_HSI_H #include "bnx2x_fw_defs.h" +#include "bnx2x_mfw_req.h" #define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e @@ -33,11 +34,6 @@ struct license_key { u32 reserved_b[4]; }; - -#define PORT_0 0 -#define PORT_1 1 -#define PORT_MAX 2 - /**************************************************************************** * Shared HW configuration * ****************************************************************************/ @@ -118,6 +114,10 @@ struct license_key { #define EPIO_CFG_EPIO30 0x0000001f #define EPIO_CFG_EPIO31 0x00000020 +struct mac_addr { + u32 upper; + u32 lower; +}; struct shared_hw_cfg { /* NVRAM Offset */ /* Up to 16 bytes of NULL-terminated string */ @@ -172,6 +172,7 @@ struct shared_hw_cfg { /* NVRAM Offset */ #define SHARED_HW_CFG_LED_MAC4 0x000c0000 #define SHARED_HW_CFG_LED_PHY8 0x000d0000 #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000 + #define SHARED_HW_CFG_LED_EXTPHY2 0x000f0000 #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 @@ -504,7 +505,30 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ u32 e3_cmn_pin_cfg1; /* 0x170 */ #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0 - u32 reserved0[7]; /* 0x174 */ + + /* pause on host ring */ + u32 generic_features; /* 0x174 */ + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_MASK 0x00000001 + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_SHIFT 0 + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000 + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001 + + /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2 + * LOM recommended and tested value is 0xBEB2. Using a different + * value means using a value not tested by BRCM + */ + u32 sfi_tap_values; /* 0x178 */ + #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF + #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0 + + /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested + * value is 0x2. LOM recommended and tested value is 0x2. Using a + * different value means using a value not tested by BRCM + */ + #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000 + #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16 + + u32 reserved0[5]; /* 0x17c */ u32 aeu_int_mask; /* 0x190 */ @@ -618,12 +642,6 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000 #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000 - /* Enable RJ45 magjack pair swapping on 10GBase-T PHY, 84833 only */ - #define PORT_HW_CFG_RJ45_PR_SWP_MASK 0x00400000 - #define PORT_HW_CFG_RJ45_PR_SWP_SHIFT 22 - #define PORT_HW_CFG_RJ45_PR_SWP_DISABLED 0x00000000 - #define PORT_HW_CFG_RJ45_PR_SWP_ENABLED 0x00400000 - /* Determine the Serdes electrical interface */ #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000 #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24 @@ -705,6 +723,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE 0x00000e00 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616 0x00001000 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834 0x00001100 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 @@ -761,6 +780,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE 0x00000e00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616 0x00001000 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834 0x00001100 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 @@ -838,6 +858,7 @@ struct shared_feat_cfg { /* NVRAM Offset */ #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400 /* The interval in seconds between sending LLDP packets. Set to zero to disable the feature */ @@ -898,10 +919,9 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000 #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100 - #define PORT_FEAT_CFG_AUTOGREEN_MASK 0x00000200 - #define PORT_FEAT_CFG_AUTOGREEN_SHIFT 9 - #define PORT_FEAT_CFG_AUTOGREEN_DISABLED 0x00000000 - #define PORT_FEAT_CFG_AUTOGREEN_ENABLED 0x00000200 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK 0x00000C00 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE 0x00000400 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI 0x00000800 #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000 #define PORT_FEATURE_EN_SIZE_SHIFT 24 @@ -1076,8 +1096,18 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ uses the same defines as link_config */ u32 mfw_wol_link_cfg2; /* 0x480 */ - u32 Reserved2[17]; /* 0x484 */ + /* EEE power saving mode */ + u32 eee_power_mode; /* 0x484 */ + #define PORT_FEAT_CFG_EEE_POWER_MODE_MASK 0x000000FF + #define PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT 0 + #define PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED 0x00000000 + #define PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED 0x00000001 + #define PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE 0x00000002 + #define PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY 0x00000003 + + + u32 Reserved2[16]; /* 0x488 */ }; @@ -1139,8 +1169,7 @@ struct shm_dev_info { /* size */ #define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) -/* LED Blink rate that will achieve ~15.9Hz */ -#define LED_BLINK_RATE_VAL 480 +#define MFW_TRACE_SIGNATURE 0x54524342 /**************************************************************************** * Driver <-> FW Mailbox * @@ -1150,6 +1179,7 @@ struct drv_port_mb { u32 link_status; /* Driver should update this field on any link change event */ + #define LINK_STATUS_NONE (0<<0) #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 #define LINK_STATUS_LINK_UP 0x00000001 #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E @@ -1207,6 +1237,7 @@ struct drv_port_mb { #define LINK_STATUS_PFC_ENABLED 0x20000000 #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000 + #define LINK_STATUS_SFP_TX_FAULT 0x80000000 u32 port_stx; @@ -1246,22 +1277,42 @@ struct drv_func_mb { #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 + #define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED 0xa2000000 + #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 + #define REQ_BC_VER_4_MT_SUPPORTED 0x00070201 #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201 + #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209 #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 + #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 + + #define DRV_MSG_CODE_AFEX_DRIVER_SETMAC 0xd0000000 + #define DRV_MSG_CODE_AFEX_LISTGET_ACK 0xd1000000 + #define DRV_MSG_CODE_AFEX_LISTSET_ACK 0xd2000000 + #define DRV_MSG_CODE_AFEX_STATSGET_ACK 0xd3000000 + #define DRV_MSG_CODE_AFEX_VIFSET_ACK 0xd4000000 + #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000 #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000 + #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 + + #define DRV_MSG_CODE_RMMOD 0xdb000000 + #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f + #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 #define REQ_BC_VER_4_SET_MF_BW 0x00060202 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000 + #define DRV_MSG_CODE_INITIATE_FLR 0x02000000 + #define REQ_BC_VER_4_INITIATE_FLR 0x00070213 + #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 @@ -1273,6 +1324,11 @@ struct drv_func_mb { #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000 #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000 + #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002 + + #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a + #define DRV_MSG_CODE_LOAD_REQ_FORCE_LFA 0x00002000 + u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 @@ -1307,9 +1363,21 @@ struct drv_func_mb { #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 + #define FW_MSG_CODE_HW_SET_INVALID_IMAGE 0xb0100000 + + #define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE 0xd0100000 + #define FW_MSG_CODE_AFEX_LISTGET_ACK 0xd1100000 + #define FW_MSG_CODE_AFEX_LISTSET_ACK 0xd2100000 + #define FW_MSG_CODE_AFEX_STATSGET_ACK 0xd3100000 + #define FW_MSG_CODE_AFEX_VIFSET_ACK 0xd4100000 + #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000 #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000 + #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 + + #define FW_MSG_CODE_RMMOD_ACK 0xdb100000 + #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 @@ -1365,8 +1433,16 @@ struct drv_func_mb { #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 + #define DRV_STATUS_AFEX_EVENT_MASK 0x03f00000 + #define DRV_STATUS_AFEX_LISTGET_REQ 0x00100000 + #define DRV_STATUS_AFEX_LISTSET_REQ 0x00200000 + #define DRV_STATUS_AFEX_STATSGET_REQ 0x00400000 + #define DRV_STATUS_AFEX_VIFSET_REQ 0x00800000 + #define DRV_STATUS_DRV_INFO_REQ 0x04000000 + #define DRV_STATUS_EEE_NEGOTIATION_RESULTS 0x08000000 + u32 virt_mac_upper; #define VIRT_MAC_SIGN_MASK 0xffff0000 #define VIRT_MAC_SIGNATURE 0x564d0000 @@ -1407,7 +1483,7 @@ struct port_mf_cfg { #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0 #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK - u32 reserved[3]; + u32 reserved[1]; }; @@ -1456,18 +1532,38 @@ struct func_mf_cfg { #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK - u32 reserved[2]; + /* afex default VLAN ID - 12 bits */ + #define FUNC_MF_CFG_AFEX_VLAN_MASK 0x0fff0000 + #define FUNC_MF_CFG_AFEX_VLAN_SHIFT 16 + + u32 afex_config; + #define FUNC_MF_CFG_AFEX_COS_FILTER_MASK 0x000000ff + #define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT 0 + #define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK 0x0000ff00 + #define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT 8 + #define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL 0x00000100 + #define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK 0x000f0000 + #define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT 16 + + u32 reserved; +}; + +enum mf_cfg_afex_vlan_mode { + FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0, + FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE, + FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE }; /* This structure is not applicable and should not be accessed on 57711 */ struct func_ext_cfg { u32 func_cfg; - #define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF + #define MACP_FUNC_CFG_FLAGS_MASK 0x0000007F #define MACP_FUNC_CFG_FLAGS_SHIFT 0 #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001 #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002 #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004 #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008 + #define MACP_FUNC_CFG_PAUSE_ON_HOST_RING 0x00000080 u32 iscsi_mac_addr_upper; u32 iscsi_mac_addr_lower; @@ -1493,7 +1589,8 @@ struct func_ext_cfg { struct mf_cfg { struct shared_mf_cfg shared_mf_config; /* 0x4 */ - struct port_mf_cfg port_mf_config[PORT_MAX]; /* 0x10 * 2 = 0x20 */ + /* 0x8*2*2=0x20 */ + struct port_mf_cfg port_mf_config[NVM_PATH_MAX][PORT_MAX]; /* for all chips, there are 8 mf functions */ struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */ /* @@ -1577,6 +1674,11 @@ struct fw_flr_mb { struct fw_flr_ack ack; }; +struct eee_remote_vals { + u32 tx_tw; + u32 rx_tw; +}; + /**** SUPPORT FOR SHMEM ARRRAYS *** * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to * define arrays with storage types smaller then unsigned dwords. @@ -1845,11 +1947,79 @@ struct lldp_local_mib { #define DCBX_LOCAL_PFC_MISMATCH 0x00000010 #define DCBX_LOCAL_APP_MISMATCH 0x00000020 #define DCBX_REMOTE_MIB_ERROR 0x00000040 + #define DCBX_REMOTE_ETS_TLV_NOT_FOUND 0x00000080 + #define DCBX_REMOTE_PFC_TLV_NOT_FOUND 0x00000100 + #define DCBX_REMOTE_APP_TLV_NOT_FOUND 0x00000200 struct dcbx_features features; u32 suffix_seq_num; }; /***END OF DCBX STRUCTURES DECLARATIONS***/ +/***********************************************************/ +/* Elink section */ +/***********************************************************/ +#define SHMEM_LINK_CONFIG_SIZE 2 +struct shmem_lfa { + u32 req_duplex; + #define REQ_DUPLEX_PHY0_MASK 0x0000ffff + #define REQ_DUPLEX_PHY0_SHIFT 0 + #define REQ_DUPLEX_PHY1_MASK 0xffff0000 + #define REQ_DUPLEX_PHY1_SHIFT 16 + u32 req_flow_ctrl; + #define REQ_FLOW_CTRL_PHY0_MASK 0x0000ffff + #define REQ_FLOW_CTRL_PHY0_SHIFT 0 + #define REQ_FLOW_CTRL_PHY1_MASK 0xffff0000 + #define REQ_FLOW_CTRL_PHY1_SHIFT 16 + u32 req_line_speed; /* Also determine AutoNeg */ + #define REQ_LINE_SPD_PHY0_MASK 0x0000ffff + #define REQ_LINE_SPD_PHY0_SHIFT 0 + #define REQ_LINE_SPD_PHY1_MASK 0xffff0000 + #define REQ_LINE_SPD_PHY1_SHIFT 16 + u32 speed_cap_mask[SHMEM_LINK_CONFIG_SIZE]; + u32 additional_config; + #define REQ_FC_AUTO_ADV_MASK 0x0000ffff + #define REQ_FC_AUTO_ADV0_SHIFT 0 + #define NO_LFA_DUE_TO_DCC_MASK 0x00010000 + u32 lfa_sts; + #define LFA_LINK_FLAP_REASON_OFFSET 0 + #define LFA_LINK_FLAP_REASON_MASK 0x000000ff + #define LFA_LINK_DOWN 0x1 + #define LFA_LOOPBACK_ENABLED 0x2 + #define LFA_DUPLEX_MISMATCH 0x3 + #define LFA_MFW_IS_TOO_OLD 0x4 + #define LFA_LINK_SPEED_MISMATCH 0x5 + #define LFA_FLOW_CTRL_MISMATCH 0x6 + #define LFA_SPEED_CAP_MISMATCH 0x7 + #define LFA_DCC_LFA_DISABLED 0x8 + #define LFA_EEE_MISMATCH 0x9 + + #define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8 + #define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00 + + #define LINK_FLAP_COUNT_OFFSET 16 + #define LINK_FLAP_COUNT_MASK 0x00ff0000 + + #define LFA_FLAGS_MASK 0xff000000 + #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24) +}; + +/* Used to support NSCI get OS driver version + * on driver load the version value will be set + * on driver unload driver value of 0x0 will be set. + */ +struct os_drv_ver { +#define DRV_VER_NOT_LOADED 0 + + /* personalties order is important */ +#define DRV_PERS_ETHERNET 0 +#define DRV_PERS_ISCSI 1 +#define DRV_PERS_FCOE 2 + + /* shmem2 struct is constant can't add more personalties here */ +#define MAX_DRV_PERS 3 + u32 versions[MAX_DRV_PERS]; +}; + struct ncsi_oem_fcoe_features { u32 fcoe_features1; #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF @@ -1949,23 +2119,39 @@ struct shmem2_region { u32 nvm_retain_bitmap_addr; /* 0x0070 */ - u32 reserved1; /* 0x0074 */ + /* afex support of that driver */ + u32 afex_driver_support; /* 0x0074 */ + #define SHMEM_AFEX_VERSION_MASK 0x100f + #define SHMEM_AFEX_SUPPORTED_VERSION_ONE 0x1001 + #define SHMEM_AFEX_REDUCED_DRV_LOADED 0x8000 - u32 reserved2[E2_FUNC_MAX]; + /* driver receives addr in scratchpad to which it should respond */ + u32 afex_scratchpad_addr_to_write[E2_FUNC_MAX]; - u32 reserved3[E2_FUNC_MAX];/* 0x0088 */ - u32 reserved4[E2_FUNC_MAX];/* 0x0098 */ + /* generic params from MCP to driver (value depends on the msg sent + * to driver + */ + u32 afex_param1_to_driver[E2_FUNC_MAX]; /* 0x0088 */ + u32 afex_param2_to_driver[E2_FUNC_MAX]; /* 0x0098 */ u32 swim_base_addr; /* 0x0108 */ u32 swim_funcs; u32 swim_main_cb; - u32 reserved5[2]; + /* bitmap notifying which VIF profiles stored in nvram are enabled by + * switch + */ + u32 afex_profiles_enabled[2]; /* generic flags controlled by the driver */ u32 drv_flags; - #define DRV_FLAGS_DCB_CONFIGURED 0x1 + #define DRV_FLAGS_DCB_CONFIGURED 0x0 + #define DRV_FLAGS_DCB_CONFIGURATION_ABORTED 0x1 + #define DRV_FLAGS_DCB_MFW_CONFIGURED 0x2 + #define DRV_FLAGS_PORT_MASK ((1 << DRV_FLAGS_DCB_CONFIGURED) | \ + (1 << DRV_FLAGS_DCB_CONFIGURATION_ABORTED) | \ + (1 << DRV_FLAGS_DCB_MFW_CONFIGURED)) /* pointer to extended dev_info shared data copied from nvm image */ u32 extended_dev_info_shared_addr; u32 ncsi_oem_data_addr; @@ -2002,6 +2188,64 @@ struct shmem2_region { #define DRV_INFO_CONTROL_VER_SHIFT 0 #define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00 #define DRV_INFO_CONTROL_OP_CODE_SHIFT 8 + u32 ibft_host_addr; /* initialized by option ROM */ + struct eee_remote_vals eee_remote_vals[PORT_MAX]; + u32 reserved[E2_FUNC_MAX]; + + + /* the status of EEE auto-negotiation + * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31. + * bits 19:16 the supported modes for EEE. + * bits 23:20 the speeds advertised for EEE. + * bits 27:24 the speeds the Link partner advertised for EEE. + * The supported/adv. modes in bits 27:19 originate from the + * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed). + * bit 28 when 1'b1 EEE was requested. + * bit 29 when 1'b1 tx lpi was requested. + * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted iff + * 30:29 are 2'b11. + * bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as + * value. When 1'b1 those bits contains a value times 16 microseconds. + */ + u32 eee_status[PORT_MAX]; + #define SHMEM_EEE_TIMER_MASK 0x0000ffff + #define SHMEM_EEE_SUPPORTED_MASK 0x000f0000 + #define SHMEM_EEE_SUPPORTED_SHIFT 16 + #define SHMEM_EEE_ADV_STATUS_MASK 0x00f00000 + #define SHMEM_EEE_100M_ADV (1<<0) + #define SHMEM_EEE_1G_ADV (1<<1) + #define SHMEM_EEE_10G_ADV (1<<2) + #define SHMEM_EEE_ADV_STATUS_SHIFT 20 + #define SHMEM_EEE_LP_ADV_STATUS_MASK 0x0f000000 + #define SHMEM_EEE_LP_ADV_STATUS_SHIFT 24 + #define SHMEM_EEE_REQUESTED_BIT 0x10000000 + #define SHMEM_EEE_LPI_REQUESTED_BIT 0x20000000 + #define SHMEM_EEE_ACTIVE_BIT 0x40000000 + #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000 + + u32 sizeof_port_stats; + + /* Link Flap Avoidance */ + u32 lfa_host_addr[PORT_MAX]; + u32 reserved1; + + u32 reserved2; /* Offset 0x148 */ + u32 reserved3; /* Offset 0x14C */ + u32 reserved4; /* Offset 0x150 */ + u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ + #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0) + + u32 reserved5[2]; + u32 reserved6[PORT_MAX]; + + /* driver version for each personality */ + struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */ + + /* Flag to the driver that PF's drv_info_host_addr buffer was read */ + u32 mfw_drv_indication; + + /* We use indication for each PF (0..3) */ +#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_)) }; @@ -2548,6 +2792,9 @@ struct host_port_stats { u32 pfc_frames_tx_lo; u32 pfc_frames_rx_hi; u32 pfc_frames_rx_lo; + + u32 eee_lpi_count_hi; + u32 eee_lpi_count_lo; }; @@ -2587,121 +2834,50 @@ struct host_func_stats { /* VIC definitions */ #define VICSTATST_UIF_INDEX 2 -/* current drv_info version */ -#define DRV_INFO_CUR_VER 1 - -/* drv_info op codes supported */ -enum drv_info_opcode { - ETH_STATS_OPCODE, - FCOE_STATS_OPCODE, - ISCSI_STATS_OPCODE -}; - -#define ETH_STAT_INFO_VERSION_LEN 12 -/* Per PCI Function Ethernet Statistics required from the driver */ -struct eth_stats_info { - /* Function's Driver Version. padded to 12 */ - u8 version[ETH_STAT_INFO_VERSION_LEN]; - /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */ - u8 mac_local[8]; - u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ - u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */ - u32 mtu_size; /* MTU Size. Note : Negotiated MTU */ - u32 feature_flags; /* Feature_Flags. */ -#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01 -#define FEATURE_ETH_LSO_MASK 0x02 -#define FEATURE_ETH_BOOTMODE_MASK 0x1C -#define FEATURE_ETH_BOOTMODE_SHIFT 2 -#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2) -#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2) -#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2) -#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2) -#define FEATURE_ETH_TOE_MASK 0x20 - u32 lso_max_size; /* LSO MaxOffloadSize. */ - u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */ - /* Num Offloaded Connections TCP_IPv4. */ - u32 ipv4_ofld_cnt; - /* Num Offloaded Connections TCP_IPv6. */ - u32 ipv6_ofld_cnt; - u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */ - u32 txq_size; /* TX Descriptors Queue Size */ - u32 rxq_size; /* RX Descriptors Queue Size */ - /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */ - u32 txq_avg_depth; - /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */ - u32 rxq_avg_depth; - /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/ - u32 iov_offload; - /* Number of NetQueue/VMQ Config'd. */ - u32 netq_cnt; - u32 vf_cnt; /* Num VF assigned to this PF. */ -}; - -/* Per PCI Function FCOE Statistics required from the driver */ -struct fcoe_stats_info { - u8 version[12]; /* Function's Driver Version. */ - u8 mac_local[8]; /* Locally Admin Addr. */ - u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ - u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */ - /* QoS Priority (per 802.1p). 0-7255 */ - u32 qos_priority; - u32 txq_size; /* FCoE TX Descriptors Queue Size. */ - u32 rxq_size; /* FCoE RX Descriptors Queue Size. */ - /* FCoE TX Descriptor Queue Avg Depth. */ - u32 txq_avg_depth; - /* FCoE RX Descriptors Queue Avg Depth. */ - u32 rxq_avg_depth; - u32 rx_frames_lo; /* FCoE RX Frames received. */ - u32 rx_frames_hi; /* FCoE RX Frames received. */ - u32 rx_bytes_lo; /* FCoE RX Bytes received. */ - u32 rx_bytes_hi; /* FCoE RX Bytes received. */ - u32 tx_frames_lo; /* FCoE TX Frames sent. */ - u32 tx_frames_hi; /* FCoE TX Frames sent. */ - u32 tx_bytes_lo; /* FCoE TX Bytes sent. */ - u32 tx_bytes_hi; /* FCoE TX Bytes sent. */ -}; - -/* Per PCI Function iSCSI Statistics required from the driver*/ -struct iscsi_stats_info { - u8 version[12]; /* Function's Driver Version. */ - u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */ - u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ - /* QoS Priority (per 802.1p). 0-7255 */ - u32 qos_priority; - u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */ - u8 ww_port_name[64]; /* iSCSI World wide port name */ - u8 boot_target_name[64];/* iSCSI Boot Target Name. */ - u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */ - u32 boot_target_portal; /* iSCSI Boot Target Portal. */ - u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */ - u32 max_frame_size; /* Max Frame Size. bytes */ - u32 txq_size; /* PDU TX Descriptors Queue Size. */ - u32 rxq_size; /* PDU RX Descriptors Queue Size. */ - u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */ - u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */ - u32 rx_pdus_lo; /* iSCSI PDUs received. */ - u32 rx_pdus_hi; /* iSCSI PDUs received. */ - u32 rx_bytes_lo; /* iSCSI RX Bytes received. */ - u32 rx_bytes_hi; /* iSCSI RX Bytes received. */ - u32 tx_pdus_lo; /* iSCSI PDUs sent. */ - u32 tx_pdus_hi; /* iSCSI PDUs sent. */ - u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */ - u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */ - u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable. - * 9 nibbles, the position of each nibble - * represents the C-PCP value, the value - * of the nibble = S-PCP value. - */ -}; - -union drv_info_to_mcp { - struct eth_stats_info ether_stat; - struct fcoe_stats_info fcoe_stat; - struct iscsi_stats_info iscsi_stat; + +/* stats collected for afex. + * NOTE: structure is exactly as expected to be received by the switch. + * order must remain exactly as is unless protocol changes ! + */ +struct afex_stats { + u32 tx_unicast_frames_hi; + u32 tx_unicast_frames_lo; + u32 tx_unicast_bytes_hi; + u32 tx_unicast_bytes_lo; + u32 tx_multicast_frames_hi; + u32 tx_multicast_frames_lo; + u32 tx_multicast_bytes_hi; + u32 tx_multicast_bytes_lo; + u32 tx_broadcast_frames_hi; + u32 tx_broadcast_frames_lo; + u32 tx_broadcast_bytes_hi; + u32 tx_broadcast_bytes_lo; + u32 tx_frames_discarded_hi; + u32 tx_frames_discarded_lo; + u32 tx_frames_dropped_hi; + u32 tx_frames_dropped_lo; + + u32 rx_unicast_frames_hi; + u32 rx_unicast_frames_lo; + u32 rx_unicast_bytes_hi; + u32 rx_unicast_bytes_lo; + u32 rx_multicast_frames_hi; + u32 rx_multicast_frames_lo; + u32 rx_multicast_bytes_hi; + u32 rx_multicast_bytes_lo; + u32 rx_broadcast_frames_hi; + u32 rx_broadcast_frames_lo; + u32 rx_broadcast_bytes_hi; + u32 rx_broadcast_bytes_lo; + u32 rx_frames_discarded_hi; + u32 rx_frames_discarded_lo; + u32 rx_frames_dropped_hi; + u32 rx_frames_dropped_lo; }; + #define BCM_5710_FW_MAJOR_VERSION 7 -#define BCM_5710_FW_MINOR_VERSION 0 -#define BCM_5710_FW_REVISION_VERSION 29 +#define BCM_5710_FW_MINOR_VERSION 8 +#define BCM_5710_FW_REVISION_VERSION 19 #define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 @@ -3258,6 +3434,10 @@ struct regpair { __le32 hi; }; +struct regpair_native { + u32 lo; + u32 hi; +}; /* * Classify rule opcodes in E2/E3 @@ -3308,8 +3488,10 @@ struct client_init_rx_data { #define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0 #define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1) #define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1 -#define CLIENT_INIT_RX_DATA_RESERVED5 (0x3F<<2) -#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 2 +#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2) +#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2 +#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3) +#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3 u8 vmqueue_mode_en_flg; u8 extra_data_over_sgl_en_flg; u8 cache_line_alignment_log_size; @@ -3324,7 +3506,7 @@ struct client_init_rx_data { u8 outer_vlan_removal_enable_flg; u8 status_block_id; u8 rx_sb_index_number; - u8 reserved0; + u8 dont_verify_rings_pause_thr_flg; u8 max_tpa_queues; u8 silent_vlan_removal_flg; __le16 max_bytes_on_bd; @@ -3387,11 +3569,14 @@ struct client_init_tx_data { #define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2 #define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3) #define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3 -#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4) -#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4 +#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4) +#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4 u8 default_vlan_flg; - u8 reserved2; - __le32 reserved3; + u8 force_default_pri_flg; + u8 tunnel_lso_inc_ip_id; + u8 refuse_outband_vlan_flg; + u8 tunnel_non_lso_pcsum_location; + u8 reserved1; }; /* @@ -3425,6 +3610,11 @@ struct client_update_ramrod_data { __le16 silent_vlan_mask; u8 silent_vlan_removal_flg; u8 silent_vlan_change_flg; + u8 refuse_outband_vlan_flg; + u8 refuse_outband_vlan_change_flg; + u8 tx_switching_flg; + u8 tx_switching_change_flg; + __le32 reserved1; __le32 echo; }; @@ -3494,7 +3684,8 @@ struct eth_classify_header { */ struct eth_classify_mac_cmd { struct eth_classify_cmd_header header; - __le32 reserved0; + __le16 reserved0; + __le16 inner_mac; __le16 mac_lsb; __le16 mac_mid; __le16 mac_msb; @@ -3507,7 +3698,8 @@ struct eth_classify_mac_cmd { */ struct eth_classify_pair_cmd { struct eth_classify_cmd_header header; - __le32 reserved0; + __le16 reserved0; + __le16 inner_mac; __le16 mac_lsb; __le16 mac_mid; __le16 mac_msb; @@ -3657,11 +3849,12 @@ struct eth_fast_path_rx_cqe { u8 placement_offset; __le32 rss_hash_result; __le16 vlan_tag; - __le16 pkt_len; + __le16 pkt_len_or_gro_seg_len; __le16 len_on_bd; struct parsing_flags pars_flags; union eth_sgl_or_raw_data sgl_or_raw_data; - __le32 reserved1[8]; + __le32 reserved1[7]; + u32 marker; }; @@ -3729,8 +3922,68 @@ struct eth_halt_ramrod_data { /* - * Command for setting multicast classification for a client + * destination and source mac address. */ +struct eth_mac_addresses { +#if defined(__BIG_ENDIAN) + __le16 dst_mid; + __le16 dst_lo; +#elif defined(__LITTLE_ENDIAN) + __le16 dst_lo; + __le16 dst_mid; +#endif +#if defined(__BIG_ENDIAN) + __le16 src_lo; + __le16 dst_hi; +#elif defined(__LITTLE_ENDIAN) + __le16 dst_hi; + __le16 src_lo; +#endif +#if defined(__BIG_ENDIAN) + __le16 src_hi; + __le16 src_mid; +#elif defined(__LITTLE_ENDIAN) + __le16 src_mid; + __le16 src_hi; +#endif +}; + +/* tunneling related data */ +struct eth_tunnel_data { +#if defined(__BIG_ENDIAN) + __le16 dst_mid; + __le16 dst_lo; +#elif defined(__LITTLE_ENDIAN) + __le16 dst_lo; + __le16 dst_mid; +#endif +#if defined(__BIG_ENDIAN) + __le16 reserved0; + __le16 dst_hi; +#elif defined(__LITTLE_ENDIAN) + __le16 dst_hi; + __le16 reserved0; +#endif +#if defined(__BIG_ENDIAN) + u8 reserved1; + u8 ip_hdr_start_inner_w; + __le16 pseudo_csum; +#elif defined(__LITTLE_ENDIAN) + __le16 pseudo_csum; + u8 ip_hdr_start_inner_w; + u8 reserved1; +#endif +}; + +/* union for mac addresses and for tunneling data. + * considered as tunneling data only if (tunnel_exist == 1). + */ +union eth_mac_addr_or_tunnel_data { + struct eth_mac_addresses mac_addr; + struct eth_tunnel_data tunnel_data; +}; + +/*Command for setting multicast classification for a client */ struct eth_multicast_rules_cmd { u8 cmd_general_data; #define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0) @@ -3748,7 +4001,6 @@ struct eth_multicast_rules_cmd { struct regpair reserved3; }; - /* * parameters for multicast classification ramrod */ @@ -3757,7 +4009,6 @@ struct eth_multicast_rules_ramrod_data { struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT]; }; - /* * Place holder for ramrods protocol specific data */ @@ -3821,13 +4072,14 @@ struct eth_rss_update_ramrod_data { #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6) -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6 -#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7) -#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7 +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6) +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6 +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7) +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7 u8 rss_result_mask; u8 rss_mode; - __le32 __reserved2; + __le16 udp_4tuple_dst_port_mask; + __le16 udp_4tuple_dst_port_value; u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE]; __le32 rss_key[T_ETH_RSS_KEY]; __le32 echo; @@ -3991,6 +4243,23 @@ enum eth_tpa_update_command { MAX_ETH_TPA_UPDATE_COMMAND }; +/* In case of LSO over IPv4 tunnel, whether to increment + * IP ID on external IP header or internal IP header + */ +enum eth_tunnel_lso_inc_ip_id { + EXT_HEADER, + INT_HEADER, + MAX_ETH_TUNNEL_LSO_INC_IP_ID +}; + +/* In case tunnel exist and L4 checksum offload, + * the pseudo checksum location, on packet or on BD. + */ +enum eth_tunnel_non_lso_pcsum_location { + PCSUM_ON_PKT, + PCSUM_ON_BD, + MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION +}; /* * Tx regular BD structure @@ -4040,27 +4309,29 @@ struct eth_tx_start_bd { #define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 #define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) #define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 -#define ETH_TX_START_BD_RESREVED (0x1<<5) -#define ETH_TX_START_BD_RESREVED_SHIFT 5 -#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6) -#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6 +#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5) +#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5 +#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7) +#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7 }; /* * Tx parsing BD structure for ETH E1/E1h */ struct eth_tx_parse_bd_e1x { - u8 global_data; + __le16 global_data; #define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0) #define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0 -#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4) -#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4 -#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5) -#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5 -#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6) -#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6 -#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7) -#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7 +#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE (0x3<<4) +#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT 4 +#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<6) +#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 6 +#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<7) +#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 7 +#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<8) +#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 8 +#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x7F<<9) +#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 9 u8 tcp_flags; #define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0) #define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0 @@ -4079,7 +4350,6 @@ struct eth_tx_parse_bd_e1x { #define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7) #define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7 u8 ip_hlen_w; - s8 reserved; __le16 total_hlen_w; __le16 tcp_pseudo_csum; __le16 lso_mss; @@ -4091,26 +4361,66 @@ struct eth_tx_parse_bd_e1x { * Tx parsing BD structure for ETH E2 */ struct eth_tx_parse_bd_e2 { - __le16 dst_mac_addr_lo; - __le16 dst_mac_addr_mid; - __le16 dst_mac_addr_hi; - __le16 src_mac_addr_lo; - __le16 src_mac_addr_mid; - __le16 src_mac_addr_hi; + union eth_mac_addr_or_tunnel_data data; __le32 parsing_data; -#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0) -#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0 -#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13) -#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13 -#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17) -#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17 -#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31) -#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31 +#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0) +#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11) +#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11 +#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15) +#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 15 +#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<16) +#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 16 +#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE (0x3<<30) +#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT 30 +}; + +/* + * Tx 2nd parsing BD structure for ETH packet + */ +struct eth_tx_parse_2nd_bd { + __le16 global_data; +#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0) +#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0 +#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4) +#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4 +#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5) +#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5 +#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6) +#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6 +#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7) +#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7 +#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8) +#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8 +#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13) +#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13 + __le16 reserved1; + u8 tcp_flags; +#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0) +#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0 +#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1) +#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1 +#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2) +#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2 +#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3) +#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3 +#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4) +#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4 +#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5) +#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5 +#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6) +#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6 +#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7) +#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7 + u8 reserved2; + u8 tunnel_udp_hdr_start_w; + u8 fw_ip_hdr_to_payload_w; + __le16 fw_ip_csum_wo_len_flags_frag; + __le16 hw_ip_id; + __le32 tcp_send_seq; }; -/* - * The last BD in the BD memory will hold a pointer to the next BD memory - */ +/* The last BD in the BD memory will hold a pointer to the next BD memory */ struct eth_tx_next_bd { __le32 addr_lo; __le32 addr_hi; @@ -4125,6 +4435,7 @@ union eth_tx_bd_types { struct eth_tx_bd reg_bd; struct eth_tx_parse_bd_e1x parse_bd_e1x; struct eth_tx_parse_bd_e2 parse_bd_e2; + struct eth_tx_parse_2nd_bd parse_2nd_bd; struct eth_tx_next_bd next_bd; }; @@ -4215,6 +4526,15 @@ enum set_mac_action_type { /* + * Ethernet TPA Modes + */ +enum tpa_mode { + TPA_LRO, + TPA_GRO, + MAX_TPA_MODE}; + + +/* * tpa update ramrod data */ struct tpa_update_ramrod_data { @@ -4224,7 +4544,8 @@ struct tpa_update_ramrod_data { u8 max_tpa_queues; u8 max_sges_for_packet; u8 complete_on_both_clients; - __le16 reserved1; + u8 dont_verify_rings_pause_thr_flg; + u8 tpa_mode; __le16 sge_buff_size; __le16 max_agg_size; __le32 sge_page_base_lo; @@ -4271,13 +4592,13 @@ struct tstorm_eth_function_common_config { * MAC filtering configuration parameters per port in Tstorm */ struct tstorm_eth_mac_filter_config { - __le32 ucast_drop_all; - __le32 ucast_accept_all; - __le32 mcast_drop_all; - __le32 mcast_accept_all; - __le32 bcast_accept_all; - __le32 vlan_filter[2]; - __le32 unmatched_unicast; + u32 ucast_drop_all; + u32 ucast_accept_all; + u32 mcast_drop_all; + u32 mcast_accept_all; + u32 bcast_accept_all; + u32 vlan_filter[2]; + u32 unmatched_unicast; }; @@ -4366,8 +4687,21 @@ struct fcoe_statistics_params { /* + * The data afex vif list ramrod need + */ +struct afex_vif_list_ramrod_data { + u8 afex_vif_list_command; + u8 func_bit_map; + __le16 vif_list_index; + u8 func_to_clear; + u8 echo; + __le16 reserved1; +}; + + +/* * cfc delete event data -*/ + */ struct cfc_del_event_data { u32 cid; u32 reserved0; @@ -4439,6 +4773,65 @@ struct cmng_struct_per_port { struct cmng_flags_per_port flags; }; +/* + * a single rate shaping counter. can be used as protocol or vnic counter + */ +struct rate_shaping_counter { + u32 quota; +#if defined(__BIG_ENDIAN) + u16 __reserved0; + u16 rate; +#elif defined(__LITTLE_ENDIAN) + u16 rate; + u16 __reserved0; +#endif +}; + +/* + * per-vnic rate shaping variables + */ +struct rate_shaping_vars_per_vn { + struct rate_shaping_counter vn_counter; +}; + +/* + * per-vnic fairness variables + */ +struct fairness_vars_per_vn { + u32 cos_credit_delta[MAX_COS_NUMBER]; + u32 vn_credit_delta; + u32 __reserved0; +}; + +/* + * cmng port init state + */ +struct cmng_vnic { + struct rate_shaping_vars_per_vn vnic_max_rate[4]; + struct fairness_vars_per_vn vnic_min_rate[4]; +}; + +/* + * cmng port init state + */ +struct cmng_init { + struct cmng_struct_per_port port; + struct cmng_vnic vnic; +}; + + +/* + * driver parameters for congestion management init, all rates are in Mbps + */ +struct cmng_init_input { + u32 port_rate; + u16 vnic_min_rate[4]; + u16 vnic_max_rate[4]; + u16 cos_min_rate[MAX_COS_NUMBER]; + u16 cos_to_pause_mask[MAX_COS_NUMBER]; + struct cmng_flags_per_port flags; +}; + /* * Protocol-common command ID for slow path elements @@ -4447,17 +4840,17 @@ enum common_spqe_cmd_id { RAMROD_CMD_ID_COMMON_UNUSED, RAMROD_CMD_ID_COMMON_FUNCTION_START, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, + RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, RAMROD_CMD_ID_COMMON_CFC_DEL, RAMROD_CMD_ID_COMMON_CFC_DEL_WB, RAMROD_CMD_ID_COMMON_STAT_QUERY, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, RAMROD_CMD_ID_COMMON_START_TRAFFIC, - RAMROD_CMD_ID_COMMON_RESERVED1, - RAMROD_CMD_ID_COMMON_RESERVED2, + RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, + RAMROD_CMD_ID_COMMON_SET_TIMESYNC, MAX_COMMON_SPQE_CMD_ID }; - /* * Per-protocol connection types */ @@ -4654,21 +5047,42 @@ struct vf_flr_event_data { */ struct malicious_vf_event_data { u8 vf_id; - u8 reserved0; + u8 err_id; u16 reserved1; u32 reserved2; u32 reserved3; }; /* - * union for all event ring message types + * vif list event data */ +struct vif_list_event_data { + u8 func_bit_map; + u8 echo; + __le16 reserved0; + __le32 reserved1; + __le32 reserved2; +}; + +/* function update event data */ +struct function_update_event_data { + u8 echo; + u8 reserved; + __le16 reserved0; + __le32 reserved1; + __le32 reserved2; +}; + + +/* union for all event ring message types */ union event_data { struct vf_pf_event_data vf_pf_event; struct eth_event_data eth_event; struct cfc_del_event_data cfc_del_event; struct vf_flr_event_data vf_flr_event; struct malicious_vf_event_data malicious_vf_event; + struct vif_list_event_data vif_list_event; + struct function_update_event_data function_update_event; }; @@ -4676,7 +5090,7 @@ union event_data { * per PF event ring data */ struct event_ring_data { - struct regpair base_addr; + struct regpair_native base_addr; #if defined(__BIG_ENDIAN) u8 index_id; u8 sb_id; @@ -4733,16 +5147,16 @@ enum event_ring_opcode { EVENT_RING_OPCODE_MALICIOUS_VF, EVENT_RING_OPCODE_FORWARD_SETUP, EVENT_RING_OPCODE_RSS_UPDATE_RULES, - EVENT_RING_OPCODE_RESERVED1, - EVENT_RING_OPCODE_RESERVED2, + EVENT_RING_OPCODE_FUNCTION_UPDATE, + EVENT_RING_OPCODE_AFEX_VIF_LISTS, EVENT_RING_OPCODE_SET_MAC, EVENT_RING_OPCODE_CLASSIFICATION_RULES, EVENT_RING_OPCODE_FILTERS_RULES, EVENT_RING_OPCODE_MULTICAST_RULES, + EVENT_RING_OPCODE_SET_TIMESYNC, MAX_EVENT_RING_OPCODE }; - /* * Modes for fairness algorithm */ @@ -4754,16 +5168,6 @@ enum fairness_mode { /* - * per-vnic fairness variables - */ -struct fairness_vars_per_vn { - u32 cos_credit_delta[MAX_COS_NUMBER]; - u32 vn_credit_delta; - u32 __reserved0; -}; - - -/* * Priority and cos */ struct priority_cos { @@ -4789,14 +5193,41 @@ struct flow_control_configuration { * */ struct function_start_data { - __le16 function_mode; + u8 function_mode; + u8 allow_npar_tx_switching; __le16 sd_vlan_tag; - u16 reserved; + __le16 vif_id; u8 path_id; u8 network_cos_mode; + u8 dmae_cmd_id; + u8 gre_tunnel_mode; + u8 gre_tunnel_rss; + u8 nvgre_clss_en; + __le16 reserved1[2]; +}; + +struct function_update_data { + u8 vif_id_change_flg; + u8 afex_default_vlan_change_flg; + u8 allowed_priorities_change_flg; + u8 network_cos_mode_change_flg; + __le16 vif_id; + __le16 afex_default_vlan; + u8 allowed_priorities; + u8 network_cos_mode; + u8 lb_mode_en_change_flg; + u8 lb_mode_en; + u8 tx_switch_suspend_change_flg; + u8 tx_switch_suspend; + u8 echo; + u8 reserved1; + u8 update_gre_cfg_flg; + u8 gre_tunnel_mode; + u8 gre_tunnel_rss; + u8 nvgre_clss_en; + u32 reserved3; }; - /* * FW version stored in the Xstorm RAM */ @@ -4823,6 +5254,22 @@ struct fw_version { #define __FW_VERSION_RESERVED_SHIFT 4 }; +/* GRE RSS Mode */ +enum gre_rss_mode { + GRE_OUTER_HEADERS_RSS, + GRE_INNER_HEADERS_RSS, + NVGRE_KEY_ENTROPY_RSS, + MAX_GRE_RSS_MODE +}; + +/* GRE Tunnel Mode */ +enum gre_tunnel_type { + NO_GRE_TUNNEL, + NVGRE_TUNNEL, + L2GRE_TUNNEL, + IPGRE_TUNNEL, + MAX_GRE_TUNNEL_TYPE +}; /* * Dynamic Host-Coalescing - Driver(host) counters @@ -4901,7 +5348,7 @@ struct pci_entity { * The fast-path status block meta-data, common to all chips */ struct hc_sb_data { - struct regpair host_sb_addr; + struct regpair_native host_sb_addr; struct hc_status_block_sm state_machine[HC_SB_MAX_SM]; struct pci_entity p_func; #if defined(__BIG_ENDIAN) @@ -4915,7 +5362,7 @@ struct hc_sb_data { u8 state; u8 rsrv0; #endif - struct regpair rsrv1[2]; + struct regpair_native rsrv1[2]; }; @@ -4933,7 +5380,7 @@ enum hc_segment { * The fast-path status block meta-data */ struct hc_sp_status_block_data { - struct regpair host_sb_addr; + struct regpair_native host_sb_addr; #if defined(__BIG_ENDIAN) u8 rsrv1; u8 state; @@ -4986,6 +5433,26 @@ enum ip_ver { MAX_IP_VER }; +/* + * Malicious VF error ID + */ +enum malicious_vf_error_id { + VF_PF_CHANNEL_NOT_READY, + ETH_ILLEGAL_BD_LENGTHS, + ETH_PACKET_TOO_SHORT, + ETH_PAYLOAD_TOO_BIG, + ETH_ILLEGAL_ETH_TYPE, + ETH_ILLEGAL_LSO_HDR_LEN, + ETH_TOO_MANY_BDS, + ETH_ZERO_HDR_NBDS, + ETH_START_BD_NOT_SET, + ETH_ILLEGAL_PARSE_NBDS, + ETH_IPV6_AND_CHECKSUM, + ETH_VLAN_FLG_INCORRECT, + ETH_ILLEGAL_LSO_MSS, + ETH_TUNNEL_NOT_SUPPORTED, + MAX_MALICIOUS_VF_ERROR_ID +}; /* * Multi-function modes @@ -4994,7 +5461,7 @@ enum mf_mode { SINGLE_FUNCTION, MULTI_FUNCTION_SD, MULTI_FUNCTION_SI, - MULTI_FUNCTION_RESERVED, + MULTI_FUNCTION_AFEX, MAX_MF_MODE }; @@ -5119,6 +5586,7 @@ union protocol_common_specific_data { u8 protocol_data[8]; struct regpair phy_address; struct regpair mac_config_addr; + struct afex_vif_list_ramrod_data afex_vif_list_data; }; /* @@ -5129,30 +5597,6 @@ struct protocol_common_spe { union protocol_common_specific_data data; }; - -/* - * a single rate shaping counter. can be used as protocol or vnic counter - */ -struct rate_shaping_counter { - u32 quota; -#if defined(__BIG_ENDIAN) - u16 __reserved0; - u16 rate; -#elif defined(__LITTLE_ENDIAN) - u16 rate; - u16 __reserved0; -#endif -}; - - -/* - * per-vnic rate shaping variables - */ -struct rate_shaping_vars_per_vn { - struct rate_shaping_counter vn_counter; -}; - - /* * The send queue element */ @@ -5321,6 +5765,18 @@ enum vf_pf_channel_state { /* + * vif_list_rule_kind + */ +enum vif_list_rule_kind { + VIF_LIST_RULE_SET, + VIF_LIST_RULE_GET, + VIF_LIST_RULE_CLEAR_ALL, + VIF_LIST_RULE_CLEAR_FUNC, + MAX_VIF_LIST_RULE_KIND +}; + + +/* * zone A per-queue data */ struct xstorm_queue_zone_data { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h index 4d748e77d1a..bd90e50bd8e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h @@ -1,15 +1,15 @@ /* bnx2x_init.h: Broadcom Everest network driver. * Structures and macroes needed during the initialization. * - * Copyright (c) 2007-2011 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Eliezer Tamir - * Modified by: Vladislav Zolotarov <vladz@broadcom.com> + * Modified by: Vladislav Zolotarov */ #ifndef BNX2X_INIT_H @@ -125,7 +125,7 @@ enum { MODE_MF = 0x00000100, MODE_MF_SD = 0x00000200, MODE_MF_SI = 0x00000400, - MODE_MF_NIV = 0x00000800, + MODE_MF_AFEX = 0x00000800, MODE_E3_A0 = 0x00001000, MODE_E3_B0 = 0x00002000, MODE_COS3 = 0x00004000, @@ -241,7 +241,8 @@ static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos) REG_WR(bp, reg_addr, reg_bit_map | q_bit_map); /* set/clear queue bit in command-queue bit map - (E2/E3A0 only, valid COS values are 0/1) */ + * (E2/E3A0 only, valid COS values are 0/1) + */ if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) { reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num); reg_bit_map = REG_RD(bp, reg_addr); @@ -277,7 +278,215 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode, } -/* Returns the index of start or end of a specific block stage in ops array*/ +/* congestion managment port init api description + * the api works as follows: + * the driver should pass the cmng_init_input struct, the port_init function + * will prepare the required internal ram structure which will be passed back + * to the driver (cmng_init) that will write it into the internal ram. + * + * IMPORTANT REMARKS: + * 1. the cmng_init struct does not represent the contiguous internal ram + * structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET + * offset in order to write the port sub struct and the + * PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other + * words - don't use memcpy!). + * 2. although the cmng_init struct is filled for the maximal vnic number + * possible, the driver should only write the valid vnics into the internal + * ram according to the appropriate port mode. + */ +#define BITS_TO_BYTES(x) ((x)/8) + +/* CMNG constants, as derived from system spec calculations */ + +/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */ +#define DEF_MIN_RATE 100 + +/* resolution of the rate shaping timer - 400 usec */ +#define RS_PERIODIC_TIMEOUT_USEC 400 + +/* number of bytes in single QM arbitration cycle - + * coefficient for calculating the fairness timer + */ +#define QM_ARB_BYTES 160000 + +/* resolution of Min algorithm 1:100 */ +#define MIN_RES 100 + +/* how many bytes above threshold for + * the minimal credit of Min algorithm + */ +#define MIN_ABOVE_THRESH 32768 + +/* Fairness algorithm integration time coefficient - + * for calculating the actual Tfair + */ +#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) + +/* Memory of fairness algorithm - 2 cycles */ +#define FAIR_MEM 2 +#define SAFC_TIMEOUT_USEC 52 + +#define SDM_TICKS 4 + + +static inline void bnx2x_init_max(const struct cmng_init_input *input_data, + u32 r_param, struct cmng_init *ram_data) +{ + u32 vnic; + struct cmng_vnic *vdata = &ram_data->vnic; + struct cmng_struct_per_port *pdata = &ram_data->port; + /* rate shaping per-port variables + * 100 micro seconds in SDM ticks = 25 + * since each tick is 4 microSeconds + */ + + pdata->rs_vars.rs_periodic_timeout = + RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS; + + /* this is the threshold below which no timer arming will occur. + * 1.25 coefficient is for the threshold to be a little bigger + * then the real time to compensate for timer in-accuracy + */ + pdata->rs_vars.rs_threshold = + (5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4; + + /* rate shaping per-vnic variables */ + for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) { + /* global vnic counter */ + vdata->vnic_max_rate[vnic].vn_counter.rate = + input_data->vnic_max_rate[vnic]; + /* maximal Mbps for this vnic + * the quota in each timer period - number of bytes + * transmitted in this period + */ + vdata->vnic_max_rate[vnic].vn_counter.quota = + RS_PERIODIC_TIMEOUT_USEC * + (u32)vdata->vnic_max_rate[vnic].vn_counter.rate / 8; + } + +} + +static inline void bnx2x_init_min(const struct cmng_init_input *input_data, + u32 r_param, struct cmng_init *ram_data) +{ + u32 vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair; + struct cmng_vnic *vdata = &ram_data->vnic; + struct cmng_struct_per_port *pdata = &ram_data->port; + + /* this is the resolution of the fairness timer */ + fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; + + /* fairness per-port variables + * for 10G it is 1000usec. for 1G it is 10000usec. + */ + tFair = T_FAIR_COEF / input_data->port_rate; + + /* this is the threshold below which we won't arm the timer anymore */ + pdata->fair_vars.fair_threshold = QM_ARB_BYTES; + + /* we multiply by 1e3/8 to get bytes/msec. We don't want the credits + * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution) + */ + pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM; + + /* since each tick is 4 microSeconds */ + pdata->fair_vars.fairness_timeout = + fair_periodic_timeout_usec / SDM_TICKS; + + /* calculate sum of weights */ + vnicWeightSum = 0; + + for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) + vnicWeightSum += input_data->vnic_min_rate[vnic]; + + /* global vnic counter */ + if (vnicWeightSum > 0) { + /* fairness per-vnic variables */ + for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) { + /* this is the credit for each period of the fairness + * algorithm - number of bytes in T_FAIR (this vnic + * share of the port rate) + */ + vdata->vnic_min_rate[vnic].vn_credit_delta = + (u32)input_data->vnic_min_rate[vnic] * 100 * + (T_FAIR_COEF / (8 * 100 * vnicWeightSum)); + if (vdata->vnic_min_rate[vnic].vn_credit_delta < + pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH) { + vdata->vnic_min_rate[vnic].vn_credit_delta = + pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH; + } + } + } +} + +static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data, + u32 r_param, struct cmng_init *ram_data) +{ + u32 vnic, cos; + u32 cosWeightSum = 0; + struct cmng_vnic *vdata = &ram_data->vnic; + struct cmng_struct_per_port *pdata = &ram_data->port; + + for (cos = 0; cos < MAX_COS_NUMBER; cos++) + cosWeightSum += input_data->cos_min_rate[cos]; + + if (cosWeightSum > 0) { + + for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) { + /* Since cos and vnic shouldn't work together the rate + * to divide between the coses is the port rate. + */ + u32 *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta; + for (cos = 0; cos < MAX_COS_NUMBER; cos++) { + /* this is the credit for each period of + * the fairness algorithm - number of bytes + * in T_FAIR (this cos share of the vnic rate) + */ + ccd[cos] = + (u32)input_data->cos_min_rate[cos] * 100 * + (T_FAIR_COEF / (8 * 100 * cosWeightSum)); + if (ccd[cos] < pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH) { + ccd[cos] = + pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH; + } + } + } + } +} + +static inline void bnx2x_init_safc(const struct cmng_init_input *input_data, + struct cmng_init *ram_data) +{ + /* in microSeconds */ + ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC; +} + +/* Congestion management port init */ +static inline void bnx2x_init_cmng(const struct cmng_init_input *input_data, + struct cmng_init *ram_data) +{ + u32 r_param; + memset(ram_data, 0, sizeof(struct cmng_init)); + + ram_data->port.flags = input_data->flags; + + /* number of bytes transmitted in a rate of 10Gbps + * in one usec = 1.25KB. + */ + r_param = BITS_TO_BYTES(input_data->port_rate); + bnx2x_init_max(input_data, r_param, ram_data); + bnx2x_init_min(input_data, r_param, ram_data); + bnx2x_init_fw_wrr(input_data, r_param, ram_data); + bnx2x_init_safc(input_data, ram_data); +} + + + +/* Returns the index of start or end of a specific block stage in ops array */ #define BLOCK_OPS_IDX(block, stage, end) \ (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) @@ -357,7 +566,7 @@ static const struct { u32 e2; /* 57712 */ u32 e3; /* 578xx */ } reg_mask; /* Register mask (all valid bits) */ - char name[7]; /* Block's longest name is 6 characters long + char name[8]; /* Block's longest name is 7 characters long * (name + suffix) */ } bnx2x_blocks_parity_data[] = { @@ -431,23 +640,35 @@ static const struct { * [30] MCP Latched ump_tx_parity * [31] MCP Latched scpad_parity */ -#define MISC_AEU_ENABLE_MCP_PRTY_BITS \ +#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS \ (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY) + +#define MISC_AEU_ENABLE_MCP_PRTY_BITS \ + (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) /* Below registers control the MCP parity attention output. When * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are * enabled, when cleared - disabled. */ -static const u32 mcp_attn_ctl_regs[] = { - MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, - MISC_REG_AEU_ENABLE4_NIG_0, - MISC_REG_AEU_ENABLE4_PXP_0, - MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, - MISC_REG_AEU_ENABLE4_NIG_1, - MISC_REG_AEU_ENABLE4_PXP_1 +static const struct { + u32 addr; + u32 bits; +} mcp_attn_ctl_regs[] = { + { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, + MISC_AEU_ENABLE_MCP_PRTY_BITS }, + { MISC_REG_AEU_ENABLE4_NIG_0, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, + { MISC_REG_AEU_ENABLE4_PXP_0, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, + { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, + MISC_AEU_ENABLE_MCP_PRTY_BITS }, + { MISC_REG_AEU_ENABLE4_NIG_1, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, + { MISC_REG_AEU_ENABLE4_PXP_1, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS } }; static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) @@ -456,14 +677,14 @@ static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) u32 reg_val; for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) { - reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]); + reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr); if (enable) - reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; + reg_val |= mcp_attn_ctl_regs[i].bits; else - reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; + reg_val &= ~mcp_attn_ctl_regs[i].bits; - REG_WR(bp, mcp_attn_ctl_regs[i], reg_val); + REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val); } } @@ -499,9 +720,7 @@ static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp) bnx2x_set_mcp_parity(bp, false); } -/** - * Clear the parity error status registers. - */ +/* Clear the parity error status registers. */ static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp) { int i; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h index 7ec1724753a..5669ed2e87d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h @@ -2,14 +2,14 @@ * Static functions needed during the initialization. * This file is "included" in bnx2x_main.c. * - * Copyright (c) 2007-2011 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> - * Written by: Vladislav Zolotarov <vladz@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> + * Written by: Vladislav Zolotarov */ #ifndef BNX2X_INIT_OPS_H @@ -69,12 +69,12 @@ static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len, { if (bp->dmae_ready) bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); - else if (wb) - /* - * Wide bus registers with no dmae need to be written - * using indirect write. - */ + + /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */ + else if (wb && CHIP_IS_E1(bp)) bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); + + /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */ else bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); } @@ -99,8 +99,14 @@ static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len) { if (bp->dmae_ready) bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); - else + + /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */ + else if (CHIP_IS_E1(bp)) bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); + + /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */ + else + bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); } static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, @@ -177,8 +183,14 @@ static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, { if (bp->dmae_ready) VIRT_WR_DMAE_LEN(bp, data, addr, len, 0); - else + + /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */ + else if (CHIP_IS_E1(bp)) bnx2x_init_ind_wr(bp, addr, data, len); + + /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */ + else + bnx2x_init_str_wr(bp, addr, data, len); } static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, @@ -206,7 +218,7 @@ static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, /* gunzip_outlen is in dwords */ len = GUNZIP_OUTLEN(bp); for (i = 0; i < len; i++) - ((u32 *)GUNZIP_BUF(bp))[i] = + ((u32 *)GUNZIP_BUF(bp))[i] = (__force u32) cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]); bnx2x_write_big_buf_wb(bp, addr, len); @@ -220,7 +232,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) u16 op_end = INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_END)]; - union init_op *op; + const union init_op *op; u32 op_idx, op_type, addr, len; const u32 *data, *data_base; @@ -232,7 +244,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) for (op_idx = op_start; op_idx < op_end; op_idx++) { - op = (union init_op *)&(INIT_OPS(bp)[op_idx]); + op = (const union init_op *)&(INIT_OPS(bp)[op_idx]); /* Get generic data */ op_type = op->raw.op; addr = op->raw.offset; @@ -636,15 +648,25 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, return rc; } +static int bnx2x_ilt_mem_op_cnic(struct bnx2x *bp, u8 memop) +{ + int rc = 0; + + if (CONFIGURE_NIC_MODE(bp)) + rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop); + if (!rc) + rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop); + + return rc; +} + static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop) { int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop); if (!rc) rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop); - if (!rc) + if (!rc && CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp)) rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop); - if (!rc) - rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop); return rc; } @@ -769,12 +791,19 @@ static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp, bnx2x_ilt_client_init_op(bp, ilt_cli, initop); } +static void bnx2x_ilt_init_op_cnic(struct bnx2x *bp, u8 initop) +{ + if (CONFIGURE_NIC_MODE(bp)) + bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop); + bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop); +} + static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop) { bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop); bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop); - bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop); - bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop); + if (CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp)) + bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop); } static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num, @@ -840,25 +869,15 @@ static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count, } } -static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count) +static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count, + u32 base_reg, u32 reg) { int i; - u32 wb_data[2]; - - wb_data[0] = wb_data[1] = 0; - + u32 wb_data[2] = {0, 0}; for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) { - REG_WR(bp, QM_REG_BASEADDR + i*4, + REG_WR(bp, base_reg + i*4, qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC)); - bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, - wb_data, 2); - - if (CHIP_IS_E1H(bp)) { - REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, - qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC)); - bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8, - wb_data, 2); - } + bnx2x_init_wr_wb(bp, reg + i*8, wb_data, 2); } } @@ -873,7 +892,12 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count, case INITOP_INIT: /* set in the init-value array */ case INITOP_SET: - bnx2x_qm_set_ptr_table(bp, qm_cid_count); + bnx2x_qm_set_ptr_table(bp, qm_cid_count, + QM_REG_BASEADDR, QM_REG_PTRTBL); + if (CHIP_IS_E1H(bp)) + bnx2x_qm_set_ptr_table(bp, qm_cid_count, + QM_REG_BASEADDR_EXT_A, + QM_REG_PTRTBL_EXT_A); break; case INITOP_CLEAR: break; @@ -883,7 +907,6 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count, /**************************************************************************** * SRC initializations ****************************************************************************/ -#ifdef BCM_CNIC /* called during init func stage */ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, dma_addr_t t2_mapping, int src_cid_count) @@ -908,5 +931,4 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, U64_HI((u64)t2_mapping + (src_cid_count-1) * sizeof(struct src_ent))); } -#endif #endif /* BNX2X_INIT_OPS_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 2091e5dbbcd..53fb4fa61b4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -1,4 +1,4 @@ -/* Copyright 2008-2011 Broadcom Corporation +/* Copyright 2008-2013 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,6 +27,10 @@ #include "bnx2x.h" #include "bnx2x_cmn.h" +typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy, + struct link_params *params, + u8 dev_addr, u16 addr, u8 byte_cnt, + u8 *o_buf, u8); /********************************************************/ #define ETH_HLEN 14 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ @@ -35,12 +39,12 @@ #define ETH_MAX_PACKET_SIZE 1500 #define ETH_MAX_JUMBO_PACKET_SIZE 9600 #define MDIO_ACCESS_TIMEOUT 1000 -#define BMAC_CONTROL_RX_ENABLE 2 #define WC_LANE_MAX 4 #define I2C_SWITCH_WIDTH 2 #define I2C_BSC0 0 #define I2C_BSC1 1 #define I2C_WA_RETRY_CNT 3 +#define I2C_WA_PWR_ITER (I2C_WA_RETRY_CNT - 1) #define MCPR_IMC_COMMAND_READ_OP 1 #define MCPR_IMC_COMMAND_WRITE_OP 2 @@ -121,6 +125,7 @@ #define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI #define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS #define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI +#define GP_STATUS_20G_KR2 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 #define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD #define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD #define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD @@ -137,12 +142,21 @@ #define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD #define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD +#define LINK_UPDATE_MASK \ + (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \ + LINK_STATUS_LINK_UP | \ + LINK_STATUS_PHYSICAL_LINK_FLAG | \ + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \ + LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \ + LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \ + LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \ + LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \ + LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) - -/* */ #define SFP_EEPROM_CON_TYPE_ADDR 0x2 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 + #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22 #define SFP_EEPROM_COMP_CODE_ADDR 0x3 @@ -161,120 +175,7 @@ #define EDC_MODE_LINEAR 0x0022 #define EDC_MODE_LIMITING 0x0044 #define EDC_MODE_PASSIVE_DAC 0x0055 - -/* BRB default for class 0 E2 */ -#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR 170 -#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR 250 -#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR 10 -#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR 50 - -/* BRB thresholds for E2*/ -#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170 -#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 - -#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE 250 -#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 - -#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE 10 -#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 90 - -#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50 -#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250 - -/* BRB default for class 0 E3A0 */ -#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR 290 -#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR 410 -#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR 10 -#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR 50 - -/* BRB thresholds for E3A0 */ -#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290 -#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 - -#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE 410 -#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 - -#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE 10 -#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 170 - -#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50 -#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410 - -/* BRB default for E3B0 */ -#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR 330 -#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR 490 -#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR 15 -#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR 55 - -/* BRB thresholds for E3B0 2 port mode*/ -#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025 -#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 - -#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE 1025 -#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 - -#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE 10 -#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 1025 - -#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE 50 -#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE 1025 - -/* only for E3B0*/ -#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR 1025 -#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR 1025 - -/* Lossy +Lossless GUARANTIED == GUART */ -#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART 284 -/* Lossless +Lossless*/ -#define PFC_E3B0_2P_PAUSE_LB_GUART 236 -/* Lossy +Lossy*/ -#define PFC_E3B0_2P_NON_PAUSE_LB_GUART 342 - -/* Lossy +Lossless*/ -#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART 284 -/* Lossless +Lossless*/ -#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART 236 -/* Lossy +Lossy*/ -#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART 336 -#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST 80 - -#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART 0 -#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST 0 - -/* BRB thresholds for E3B0 4 port mode */ -#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 304 -#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 - -#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE 384 -#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 - -#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE 10 -#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 304 - -#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50 -#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384 - -/* only for E3B0*/ -#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304 -#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384 -#define PFC_E3B0_4P_LB_GUART 120 - -#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120 -#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80 - -#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80 -#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120 - -/* Pause defines*/ -#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR 330 -#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR 490 -#define DEFAULT_E3B0_LB_GUART 40 - -#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART 40 -#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST 0 - -#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART 40 -#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST 0 +#define EDC_MODE_ACTIVE_DAC 0x0066 /* ETS defines*/ #define DCBX_INVALID_COS (0xFF) @@ -286,7 +187,6 @@ #define ETS_E3B0_PBF_MIN_W_VAL (10000) #define MAX_PACKET_SIZE (9700) -#define WC_UC_TIMEOUT 100 #define MAX_KR_LINK_RETRY 4 /**********************************************************/ @@ -305,6 +205,11 @@ (_bank + (_addr & 0xf)), \ _val) +static int bnx2x_check_half_open_conn(struct link_params *params, + struct link_vars *vars, u8 notify); +static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, + struct link_params *params); + static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) { u32 val = REG_RD(bp, reg); @@ -323,6 +228,133 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits) return val; } +/* + * bnx2x_check_lfa - This function checks if link reinitialization is required, + * or link flap can be avoided. + * + * @params: link parameters + * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed + * condition code. + */ +static int bnx2x_check_lfa(struct link_params *params) +{ + u32 link_status, cfg_idx, lfa_mask, cfg_size; + u32 cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config; + u32 saved_val, req_val, eee_status; + struct bnx2x *bp = params->bp; + + additional_config = + REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)); + + /* NOTE: must be first condition checked - + * to verify DCC bit is cleared in any case! + */ + if (additional_config & NO_LFA_DUE_TO_DCC_MASK) { + DP(NETIF_MSG_LINK, "No LFA due to DCC flap after clp exit\n"); + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config), + additional_config & ~NO_LFA_DUE_TO_DCC_MASK); + return LFA_DCC_LFA_DISABLED; + } + + /* Verify that link is up */ + link_status = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + port_mb[params->port].link_status)); + if (!(link_status & LINK_STATUS_LINK_UP)) + return LFA_LINK_DOWN; + + /* if loaded after BOOT from SAN, don't flap the link in any case and + * rely on link set by preboot driver + */ + if (params->feature_config_flags & FEATURE_CONFIG_BOOT_FROM_SAN) + return 0; + + /* Verify that loopback mode is not set */ + if (params->loopback_mode) + return LFA_LOOPBACK_ENABLED; + + /* Verify that MFW supports LFA */ + if (!params->lfa_base) + return LFA_MFW_IS_TOO_OLD; + + if (params->num_phys == 3) { + cfg_size = 2; + lfa_mask = 0xffffffff; + } else { + cfg_size = 1; + lfa_mask = 0xffff; + } + + /* Compare Duplex */ + saved_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_duplex)); + req_val = params->req_duplex[0] | (params->req_duplex[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + DP(NETIF_MSG_LINK, "Duplex mismatch %x vs. %x\n", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_DUPLEX_MISMATCH; + } + /* Compare Flow Control */ + saved_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_flow_ctrl)); + req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + DP(NETIF_MSG_LINK, "Flow control mismatch %x vs. %x\n", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_FLOW_CTRL_MISMATCH; + } + /* Compare Link Speed */ + saved_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_line_speed)); + req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + DP(NETIF_MSG_LINK, "Link speed mismatch %x vs. %x\n", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_LINK_SPEED_MISMATCH; + } + + for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) { + cur_speed_cap_mask = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, + speed_cap_mask[cfg_idx])); + + if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) { + DP(NETIF_MSG_LINK, "Speed Cap mismatch %x vs. %x\n", + cur_speed_cap_mask, + params->speed_cap_mask[cfg_idx]); + return LFA_SPEED_CAP_MISMATCH; + } + } + + cur_req_fc_auto_adv = + REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)) & + REQ_FC_AUTO_ADV_MASK; + + if ((u16)cur_req_fc_auto_adv != params->req_fc_auto_adv) { + DP(NETIF_MSG_LINK, "Flow Ctrl AN mismatch %x vs. %x\n", + cur_req_fc_auto_adv, params->req_fc_auto_adv); + return LFA_FLOW_CTRL_MISMATCH; + } + + eee_status = REG_RD(bp, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port])); + + if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^ + (params->eee_mode & EEE_MODE_ENABLE_LPI)) || + ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^ + (params->eee_mode & EEE_MODE_ADV_LPI))) { + DP(NETIF_MSG_LINK, "EEE mismatch %x vs. %x\n", params->eee_mode, + eee_status); + return LFA_EEE_MISMATCH; + } + + /* LFA conditions are met */ + return 0; +} /******************************************************************/ /* EPIO/GPIO section */ /******************************************************************/ @@ -405,8 +437,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params) DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n"); - /* - * mapping between entry priority to client number (0,1,2 -debug and + /* mapping between entry priority to client number (0,1,2 -debug and * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) * 3bits client num. * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 @@ -414,8 +445,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params) */ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); - /* - * Bitmap of 5bits length. Each bit specifies whether the entry behaves + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves * as strict. Bits 0,1,2 - debug and management entries, 3 - * COS0 entry, 4 - COS1 entry. * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT @@ -426,13 +456,11 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params) REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); /* defines which entries (clients) are subjected to WFQ arbitration */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); - /* - * For strict priority entries defines the number of consecutive + /* For strict priority entries defines the number of consecutive * slots for the highest priority. */ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); - /* - * mapping between the CREDIT_WEIGHT registers and actual client + /* mapping between the CREDIT_WEIGHT registers and actual client * numbers */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0); @@ -444,8 +472,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params) REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); /* ETS mode disable */ REG_WR(bp, PBF_REG_ETS_ENABLED, 0); - /* - * If ETS mode is enabled (there is no strict priority) defines a WFQ + /* If ETS mode is enabled (there is no strict priority) defines a WFQ * weight for COS0/COS1. */ REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710); @@ -472,10 +499,9 @@ static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars) min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS; } else min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; - /** - * If the link isn't up (static configuration for example ) The - * link will be according to 20GBPS. - */ + /* If the link isn't up (static configuration for example ) The + * link will be according to 20GBPS. + */ return min_w_val; } /****************************************************************************** @@ -539,8 +565,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, struct bnx2x *bp = params->bp; const u8 port = params->port; const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars); - /** - * mapping between entry priority to client number (0,1,2 -debug and + /* Mapping between entry priority to client number (0,1,2 -debug and * management clients, 3 - COS0 client, 4 - COS1, ... 8 - * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by * reset value or init tool @@ -552,18 +577,14 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210); REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8); } - /** - * For strict priority entries defines the number of consecutive - * slots for the highest priority. - */ - /* TODO_ETS - Should be done by reset value or init tool */ + /* For strict priority entries defines the number of consecutive + * slots for the highest priority. + */ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); - /** - * mapping between the CREDIT_WEIGHT registers and actual client + /* Mapping between the CREDIT_WEIGHT registers and actual client * numbers */ - /* TODO_ETS - Should be done by reset value or init tool */ if (port) { /*Port 1 has 6 COS*/ REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543); @@ -575,8 +596,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5); } - /** - * Bitmap of 5bits length. Each bit specifies whether the entry behaves + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves * as strict. Bits 0,1,2 - debug and management entries, 3 - * COS0 entry, 4 - COS1 entry. * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT @@ -591,13 +611,12 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); - /** - * Please notice the register address are note continuous and a - * for here is note appropriate.In 2 port mode port0 only COS0-5 - * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4 - * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT - * are never used for WFQ - */ + /* Please notice the register address are note continuous and a + * for here is note appropriate.In 2 port mode port0 only COS0-5 + * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4 + * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT + * are never used for WFQ + */ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0); REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : @@ -634,10 +653,9 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf( u32 base_upper_bound = 0; u8 max_cos = 0; u8 i = 0; - /** - * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 - * port mode port1 has COS0-2 that can be used for WFQ. - */ + /* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 + * port mode port1 has COS0-2 that can be used for WFQ. + */ if (!port) { base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0; max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; @@ -667,8 +685,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) u32 base_weight = 0; u8 max_cos = 0; - /** - * mapping between entry priority to client number 0 - COS0 + /* Mapping between entry priority to client number 0 - COS0 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num. * TODO_ETS - Should be done by reset value or init tool */ @@ -696,10 +713,9 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0); - /** - * In 2 port mode port0 has COS0-5 that can be used for WFQ. - * In 4 port mode port1 has COS0-2 that can be used for WFQ. - */ + /* In 2 port mode port0 has COS0-5 that can be used for WFQ. + * In 4 port mode port1 has COS0-2 that can be used for WFQ. + */ if (!port) { base_weight = PBF_REG_COS0_WEIGHT_P0; max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; @@ -739,7 +755,7 @@ static int bnx2x_ets_e3b0_disabled(const struct link_params *params, /****************************************************************************** * Description: * Disable will return basicly the values to init values. -*. +* ******************************************************************************/ int bnx2x_ets_disabled(struct link_params *params, struct link_vars *vars) @@ -868,7 +884,7 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, /****************************************************************************** * Description: * Calculate the total BW.A value of 0 isn't legal. -*. +* ******************************************************************************/ static int bnx2x_ets_e3b0_get_total_bw( const struct link_params *params, @@ -880,7 +896,6 @@ static int bnx2x_ets_e3b0_get_total_bw( u8 is_bw_cos_exist = 0; *total_bw = 0 ; - /* Calculate total BW requested */ for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) { @@ -888,10 +903,9 @@ static int bnx2x_ets_e3b0_get_total_bw( if (!ets_params->cos[cos_idx].params.bw_params.bw) { DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" "was set to 0\n"); - /* - * This is to prevent a state when ramrods + /* This is to prevent a state when ramrods * can't be sent - */ + */ ets_params->cos[cos_idx].params.bw_params.bw = 1; } @@ -909,8 +923,7 @@ static int bnx2x_ets_e3b0_get_total_bw( } DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config total BW should be 100\n"); - /* - * We can handle a case whre the BW isn't 100 this can happen + /* We can handle a case whre the BW isn't 100 this can happen * if the TC are joined. */ } @@ -920,7 +933,7 @@ static int bnx2x_ets_e3b0_get_total_bw( /****************************************************************************** * Description: * Invalidate all the sp_pri_to_cos. -*. +* ******************************************************************************/ static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos) { @@ -932,7 +945,7 @@ static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos) * Description: * Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers * according to sp_pri_to_cos. -*. +* ******************************************************************************/ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, u8 *sp_pri_to_cos, const u8 pri, @@ -943,6 +956,12 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : DCBX_E3B0_MAX_NUM_COS_PORT0; + if (pri >= max_num_of_cos) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " + "parameter Illegal strict priority\n"); + return -EINVAL; + } + if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) { DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " "parameter There can't be two COS's with " @@ -950,12 +969,6 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, return -EINVAL; } - if (pri > max_num_of_cos) { - DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " - "parameter Illegal strict priority\n"); - return -EINVAL; - } - sp_pri_to_cos[pri] = cos_entry; return 0; @@ -965,7 +978,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, * Description: * Returns the correct value according to COS and priority in * the sp_pri_cli register. -*. +* ******************************************************************************/ static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset, const u8 pri_set, @@ -982,7 +995,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset, * Description: * Returns the correct value according to COS and priority in the * sp_pri_cli register for NIG. -*. +* ******************************************************************************/ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set) { @@ -998,7 +1011,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set) * Description: * Returns the correct value according to COS and priority in the * sp_pri_cli register for PBF. -*. +* ******************************************************************************/ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set) { @@ -1014,7 +1027,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set) * Description: * Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers * according to sp_pri_to_cos.(which COS has higher priority) -*. +* ******************************************************************************/ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, u8 *sp_pri_to_cos) @@ -1150,8 +1163,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params, return -EINVAL; } - /* - * Upper bound is set according to current link speed (min_w_val + /* Upper bound is set according to current link speed (min_w_val * should be the same for upper bound and COS credit val). */ bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig); @@ -1161,8 +1173,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params, for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) { if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) { cos_bw_bitmap |= (1 << cos_entry); - /* - * The function also sets the BW in HW(not the mappin + /* The function also sets the BW in HW(not the mappin * yet) */ bnx2x_status = bnx2x_ets_e3b0_set_cos_bw( @@ -1218,14 +1229,12 @@ static void bnx2x_ets_bw_limit_common(const struct link_params *params) /* ETS disabled configuration */ struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); - /* - * defines which entries (clients) are subjected to WFQ arbitration + /* Defines which entries (clients) are subjected to WFQ arbitration * COS0 0x8 * COS1 0x10 */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); - /* - * mapping between the ARB_CREDIT_WEIGHT registers and actual + /* Mapping between the ARB_CREDIT_WEIGHT registers and actual * client numbers (WEIGHT_0 does not actually have to represent * client 0) * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 @@ -1243,8 +1252,7 @@ static void bnx2x_ets_bw_limit_common(const struct link_params *params) /* Defines the number of consecutive slots for the strict priority */ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); - /* - * Bitmap of 5bits length. Each bit specifies whether the entry behaves + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 * entry, 4 - COS1 entry. * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT @@ -1299,8 +1307,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) u32 val = 0; DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); - /* - * Bitmap of 5bits length. Each bit specifies whether the entry behaves + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves * as strict. Bits 0,1,2 - debug and management entries, * 3 - COS0 entry, 4 - COS1 entry. * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT @@ -1308,8 +1315,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) * MCP and debug are strict */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); - /* - * For strict priority entries defines the number of consecutive slots + /* For strict priority entries defines the number of consecutive slots * for the highest priority. */ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); @@ -1321,8 +1327,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) /* Defines the number of consecutive slots for the strict priority */ REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); - /* - * mapping between entry priority to client number (0,1,2 -debug and + /* Mapping between entry priority to client number (0,1,2 -debug and * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) * 3bits client num. * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 @@ -1334,6 +1339,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) return 0; } + /******************************************************************/ /* PFC section */ /******************************************************************/ @@ -1357,22 +1363,26 @@ static void bnx2x_update_pfc_xmac(struct link_params *params, if (!(params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)) { - /* - * RX flow control - Process pause frame in receive direction + /* RX flow control - Process pause frame in receive direction */ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN; - /* - * TX flow control - Send pause packet when buffer is full - */ + /* TX flow control - Send pause packet when buffer is full */ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN; } else {/* PFC support */ pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN | XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN | XMAC_PFC_CTRL_HI_REG_RX_PFC_EN | - XMAC_PFC_CTRL_HI_REG_TX_PFC_EN; + XMAC_PFC_CTRL_HI_REG_TX_PFC_EN | + XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON; + /* Write pause and PFC registers */ + REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); + pfc1_val &= ~XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON; + } /* Write pause and PFC registers */ @@ -1394,85 +1404,50 @@ static void bnx2x_update_pfc_xmac(struct link_params *params, udelay(30); } - -static void bnx2x_emac_get_pfc_stat(struct link_params *params, - u32 pfc_frames_sent[2], - u32 pfc_frames_received[2]) -{ - /* Read pfc statistic */ - struct bnx2x *bp = params->bp; - u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - u32 val_xon = 0; - u32 val_xoff = 0; - - DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n"); - - /* PFC received frames */ - val_xoff = REG_RD(bp, emac_base + - EMAC_REG_RX_PFC_STATS_XOFF_RCVD); - val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT; - val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD); - val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT; - - pfc_frames_received[0] = val_xon + val_xoff; - - /* PFC received sent */ - val_xoff = REG_RD(bp, emac_base + - EMAC_REG_RX_PFC_STATS_XOFF_SENT); - val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT; - val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT); - val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT; - - pfc_frames_sent[0] = val_xon + val_xoff; -} - -/* Read pfc statistic*/ -void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, - u32 pfc_frames_sent[2], - u32 pfc_frames_received[2]) -{ - /* Read pfc statistic */ - struct bnx2x *bp = params->bp; - - DP(NETIF_MSG_LINK, "pfc statistic\n"); - - if (!vars->link_up) - return; - - if (vars->mac_type == MAC_TYPE_EMAC) { - DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n"); - bnx2x_emac_get_pfc_stat(params, pfc_frames_sent, - pfc_frames_received); - } -} /******************************************************************/ /* MAC/PBF section */ /******************************************************************/ -static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port) +static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, + u32 emac_base) { - u32 mode, emac_base; - /** - * Set clause 45 mode, slow down the MDIO clock to 2.5MHz + u32 new_mode, cur_mode; + u32 clc_cnt; + /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz * (a value of 49==0x31) and make sure that the AUTO poll is off */ + cur_mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); - if (CHIP_IS_E2(bp)) - emac_base = GRCBASE_EMAC0; - else - emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); - mode &= ~(EMAC_MDIO_MODE_AUTO_POLL | - EMAC_MDIO_MODE_CLOCK_CNT); if (USES_WARPCORE(bp)) - mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT); + clc_cnt = 74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT; else - mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT); + clc_cnt = 49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT; - mode |= (EMAC_MDIO_MODE_CLAUSE_45); - REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode); + if (((cur_mode & EMAC_MDIO_MODE_CLOCK_CNT) == clc_cnt) && + (cur_mode & (EMAC_MDIO_MODE_CLAUSE_45))) + return; + + new_mode = cur_mode & + ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT); + new_mode |= clc_cnt; + new_mode |= (EMAC_MDIO_MODE_CLAUSE_45); + DP(NETIF_MSG_LINK, "Changing emac_mode from 0x%x to 0x%x\n", + cur_mode, new_mode); + REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode); udelay(40); } + +static void bnx2x_set_mdio_emac_per_phy(struct bnx2x *bp, + struct link_params *params) +{ + u8 phy_index; + /* Set mdio clock per phy */ + for (phy_index = INT_PHY; phy_index < params->num_phys; + phy_index++) + bnx2x_set_mdio_clk(bp, params->chip_id, + params->phy[phy_index].mdio_ctrl); +} + static u8 bnx2x_is_4_port_mode(struct bnx2x *bp) { u32 port4mode_ovwr_val; @@ -1517,7 +1492,8 @@ static void bnx2x_emac_init(struct link_params *params, } timeout--; } while (val & EMAC_MODE_RESET); - bnx2x_set_mdio_clk(bp, params->chip_id, port); + + bnx2x_set_mdio_emac_per_phy(bp, params); /* Set mac address */ val = ((params->mac_addr[0] << 8) | params->mac_addr[1]); @@ -1544,16 +1520,23 @@ static void bnx2x_set_xumac_nig(struct link_params *params, NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en); } -static void bnx2x_umac_disable(struct link_params *params) +static void bnx2x_set_umac_rxtx(struct link_params *params, u8 en) { u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + u32 val; struct bnx2x *bp = params->bp; if (!(REG_RD(bp, MISC_REG_RESET_REG_2) & (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port))) return; - + val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG); + if (en) + val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA); + else + val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA); /* Disable RX and TX */ - REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, 0); + REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); } static void bnx2x_umac_enable(struct link_params *params, @@ -1565,22 +1548,13 @@ static void bnx2x_umac_enable(struct link_params *params, /* Reset UMAC */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); - usleep_range(1000, 1000); + usleep_range(1000, 2000); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); DP(NETIF_MSG_LINK, "enabling UMAC\n"); - /** - * This register determines on which events the MAC will assert - * error on the i/f to the NIG along w/ EOP. - */ - - /** - * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK + - * params->port*0x14, 0xfffff. - */ /* This register opens the gate for the UMAC despite its name */ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); @@ -1612,9 +1586,22 @@ static void bnx2x_umac_enable(struct link_params *params, if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)) val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE; + if (vars->duplex == DUPLEX_HALF) + val |= UMAC_COMMAND_CONFIG_REG_HD_ENA; + REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); udelay(50); + /* Configure UMAC for EEE */ + if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) { + DP(NETIF_MSG_LINK, "configured UMAC for EEE\n"); + REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, + UMAC_UMAC_EEE_CTRL_REG_EEE_EN); + REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11); + } else { + REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0); + } + /* Set MAC address for source TX Pause/PFC frames (under SW reset) */ REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0, ((params->mac_addr[2] << 24) | @@ -1640,8 +1627,7 @@ static void bnx2x_umac_enable(struct link_params *params, val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA; REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); - /* - * Maximum Frame Length (RW). Defines a 14-Bit maximum frame + /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame * length used by the MAC receive logic to check frames. */ REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); @@ -1657,14 +1643,16 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed) struct bnx2x *bp = params->bp; u32 is_port4mode = bnx2x_is_4_port_mode(bp); - /* - * In 4-port mode, need to set the mode only once, so if XMAC is + /* In 4-port mode, need to set the mode only once, so if XMAC is * already out of reset, it means the mode has already been set, * and it must not* reset the XMAC again, since it controls both * ports of the path */ - if ((CHIP_NUM(bp) == CHIP_NUM_57840) && + if (((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || + (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || + (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) && + is_port4mode && (REG_RD(bp, MISC_REG_RESET_REG_2) & MISC_REGISTERS_RESET_REG_2_XMAC)) { DP(NETIF_MSG_LINK, @@ -1675,20 +1663,20 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed) /* Hard reset */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, MISC_REGISTERS_RESET_REG_2_XMAC); - usleep_range(1000, 1000); + usleep_range(1000, 2000); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, MISC_REGISTERS_RESET_REG_2_XMAC); if (is_port4mode) { DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n"); - /* Set the number of ports on the system side to up to 2 */ + /* Set the number of ports on the system side to up to 2 */ REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1); /* Set the number of ports on the Warp Core to 10G */ REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); } else { - /* Set the number of ports on the system side to 1 */ + /* Set the number of ports on the system side to 1 */ REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0); if (max_speed == SPEED_10000) { DP(NETIF_MSG_LINK, @@ -1705,23 +1693,23 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed) /* Soft reset */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); - usleep_range(1000, 1000); + usleep_range(1000, 2000); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); } -static void bnx2x_xmac_disable(struct link_params *params) +static void bnx2x_set_xmac_rxtx(struct link_params *params, u8 en) { u8 port = params->port; struct bnx2x *bp = params->bp; u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + u32 val; if (REG_RD(bp, MISC_REG_RESET_REG_2) & MISC_REGISTERS_RESET_REG_2_XMAC) { - /* - * Send an indication to change the state in the NIG back to XON + /* Send an indication to change the state in the NIG back to XON * Clearing this bit enables the next set of this bit to get * rising edge */ @@ -1731,7 +1719,12 @@ static void bnx2x_xmac_disable(struct link_params *params) REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, (pfc_ctrl | (1<<1))); DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); - REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0); + val = REG_RD(bp, xmac_base + XMAC_REG_CTRL); + if (en) + val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN); + else + val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN); + REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); } } @@ -1746,17 +1739,27 @@ static int bnx2x_xmac_enable(struct link_params *params, bnx2x_xmac_init(params, vars->line_speed); - /* - * This register determines on which events the MAC will assert + /* This register determines on which events the MAC will assert * error on the i/f to the NIG along w/ EOP. */ - /* - * This register tells the NIG whether to send traffic to UMAC + /* This register tells the NIG whether to send traffic to UMAC * or XMAC */ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0); + /* When XMAC is in XLGMII mode, disable sending idles for fault + * detection. + */ + if (!(params->phy[INT_PHY].flags & FLAGS_TX_ERROR_CHECK)) { + REG_WR(bp, xmac_base + XMAC_REG_RX_LSS_CTRL, + (XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE | + XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE)); + REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); + REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS | + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS); + } /* Set Max packet size */ REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710); @@ -1766,9 +1769,23 @@ static int bnx2x_xmac_enable(struct link_params *params, /* update PFC */ bnx2x_update_pfc_xmac(params, vars, 0); + if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) { + DP(NETIF_MSG_LINK, "Setting XMAC for EEE\n"); + REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008); + REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1); + } else { + REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0); + } + /* Enable TX and RX */ val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN; + /* Set MAC in XLGMII mode for dual-mode */ + if ((vars->line_speed == SPEED_20000) && + (params->phy[INT_PHY].supported & + SUPPORTED_20000baseKR2_Full)) + val |= XMAC_CTRL_REG_XLGMII_ALIGN_ENB; + /* Check loopback mode */ if (lb) val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK; @@ -1821,11 +1838,6 @@ static int bnx2x_emac_enable(struct link_params *params, bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET); - if (CHIP_REV_IS_SLOW(bp)) { - /* config GMII mode */ - val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); - EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII)); - } else { /* ASIC */ /* pause enable/disable */ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, EMAC_RX_MODE_FLOW_EN); @@ -1848,14 +1860,12 @@ static int bnx2x_emac_enable(struct link_params *params, } else bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_FLOW_EN); - } /* KEEP_VLAN_TAG, promiscuous */ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; - /* - * Setting this bit causes MAC control frames (except for pause + /* Setting this bit causes MAC control frames (except for pause * frames) to be passed on for processing. This setting has no * affect on the operation of the pause frames. This bit effects * all packets regardless of RX Parser packet sorting logic. @@ -1888,23 +1898,23 @@ static int bnx2x_emac_enable(struct link_params *params, val &= ~0x810; EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); - /* enable emac */ + /* Enable emac */ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1); - /* enable emac for jumbo packets */ + /* Enable emac for jumbo packets */ EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, (EMAC_RX_MTU_SIZE_JUMBO_ENA | (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); - /* strip CRC */ + /* Strip CRC */ REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1); - /* disable the NIG in/out to the bmac */ + /* Disable the NIG in/out to the bmac */ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0); REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0); REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0); - /* enable the NIG in/out to the emac */ + /* Enable the NIG in/out to the emac */ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1); val = 0; if ((params->feature_config_flags & @@ -1939,7 +1949,7 @@ static void bnx2x_update_pfc_bmac1(struct link_params *params, wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2); - /* tx control */ + /* TX control */ val = 0xc0; if (!(params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) && @@ -1954,8 +1964,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, struct link_vars *vars, u8 is_lb) { - /* - * Set rx control: Strip CRC and enable BigMAC to relay + /* Set rx control: Strip CRC and enable BigMAC to relay * control packets to the system as well */ u32 wb_data[2]; @@ -2000,15 +2009,14 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, wb_data[0] &= ~(1<<2); } else { DP(NETIF_MSG_LINK, "PFC is disabled\n"); - /* disable PFC RX & TX & STATS and set 8 COS */ + /* Disable PFC RX & TX & STATS and set 8 COS */ wb_data[0] = 0x8; wb_data[1] = 0; } REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); - /* - * Set Time (based unit is 512 bit time) between automatic + /* Set Time (based unit is 512 bit time) between automatic * re-sending of PP packets amd enable automatic re-send of * Per-Priroity Packet as long as pp_gen is asserted and * pp_disable is low. @@ -2037,417 +2045,14 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); } -/* PFC BRB internal port configuration params */ -struct bnx2x_pfc_brb_threshold_val { - u32 pause_xoff; - u32 pause_xon; - u32 full_xoff; - u32 full_xon; -}; - -struct bnx2x_pfc_brb_e3b0_val { - u32 per_class_guaranty_mode; - u32 lb_guarantied_hyst; - u32 full_lb_xoff_th; - u32 full_lb_xon_threshold; - u32 lb_guarantied; - u32 mac_0_class_t_guarantied; - u32 mac_0_class_t_guarantied_hyst; - u32 mac_1_class_t_guarantied; - u32 mac_1_class_t_guarantied_hyst; -}; - -struct bnx2x_pfc_brb_th_val { - struct bnx2x_pfc_brb_threshold_val pauseable_th; - struct bnx2x_pfc_brb_threshold_val non_pauseable_th; - struct bnx2x_pfc_brb_threshold_val default_class0; - struct bnx2x_pfc_brb_threshold_val default_class1; - -}; -static int bnx2x_pfc_brb_get_config_params( - struct link_params *params, - struct bnx2x_pfc_brb_th_val *config_val) -{ - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n"); - - config_val->default_class1.pause_xoff = 0; - config_val->default_class1.pause_xon = 0; - config_val->default_class1.full_xoff = 0; - config_val->default_class1.full_xon = 0; - - if (CHIP_IS_E2(bp)) { - /* class0 defaults */ - config_val->default_class0.pause_xoff = - DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR; - config_val->default_class0.pause_xon = - DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR; - config_val->default_class0.full_xoff = - DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR; - config_val->default_class0.full_xon = - DEFAULT0_E2_BRB_MAC_FULL_XON_THR; - /* pause able*/ - config_val->pauseable_th.pause_xoff = - PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE; - config_val->pauseable_th.pause_xon = - PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE; - config_val->pauseable_th.full_xoff = - PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE; - config_val->pauseable_th.full_xon = - PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE; - /* non pause able*/ - config_val->non_pauseable_th.pause_xoff = - PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.pause_xon = - PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xoff = - PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xon = - PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE; - } else if (CHIP_IS_E3A0(bp)) { - /* class0 defaults */ - config_val->default_class0.pause_xoff = - DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR; - config_val->default_class0.pause_xon = - DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR; - config_val->default_class0.full_xoff = - DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR; - config_val->default_class0.full_xon = - DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR; - /* pause able */ - config_val->pauseable_th.pause_xoff = - PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE; - config_val->pauseable_th.pause_xon = - PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE; - config_val->pauseable_th.full_xoff = - PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE; - config_val->pauseable_th.full_xon = - PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE; - /* non pause able*/ - config_val->non_pauseable_th.pause_xoff = - PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.pause_xon = - PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xoff = - PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xon = - PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE; - } else if (CHIP_IS_E3B0(bp)) { - /* class0 defaults */ - config_val->default_class0.pause_xoff = - DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR; - config_val->default_class0.pause_xon = - DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR; - config_val->default_class0.full_xoff = - DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR; - config_val->default_class0.full_xon = - DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR; - - if (params->phy[INT_PHY].flags & - FLAGS_4_PORT_MODE) { - config_val->pauseable_th.pause_xoff = - PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; - config_val->pauseable_th.pause_xon = - PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE; - config_val->pauseable_th.full_xoff = - PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE; - config_val->pauseable_th.full_xon = - PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE; - /* non pause able*/ - config_val->non_pauseable_th.pause_xoff = - PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.pause_xon = - PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xoff = - PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xon = - PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE; - } else { - config_val->pauseable_th.pause_xoff = - PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; - config_val->pauseable_th.pause_xon = - PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE; - config_val->pauseable_th.full_xoff = - PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE; - config_val->pauseable_th.full_xon = - PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE; - /* non pause able*/ - config_val->non_pauseable_th.pause_xoff = - PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.pause_xon = - PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xoff = - PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xon = - PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE; - } - } else - return -EINVAL; - - return 0; -} - -static void bnx2x_pfc_brb_get_e3b0_config_params( - struct link_params *params, - struct bnx2x_pfc_brb_e3b0_val - *e3b0_val, - struct bnx2x_nig_brb_pfc_port_params *pfc_params, - const u8 pfc_enabled) -{ - if (pfc_enabled && pfc_params) { - e3b0_val->per_class_guaranty_mode = 1; - e3b0_val->lb_guarantied_hyst = 80; - - if (params->phy[INT_PHY].flags & - FLAGS_4_PORT_MODE) { - e3b0_val->full_lb_xoff_th = - PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR; - e3b0_val->full_lb_xon_threshold = - PFC_E3B0_4P_BRB_FULL_LB_XON_THR; - e3b0_val->lb_guarantied = - PFC_E3B0_4P_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART; - e3b0_val->mac_0_class_t_guarantied_hyst = - PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST; - e3b0_val->mac_1_class_t_guarantied = - PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART; - e3b0_val->mac_1_class_t_guarantied_hyst = - PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST; - } else { - e3b0_val->full_lb_xoff_th = - PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR; - e3b0_val->full_lb_xon_threshold = - PFC_E3B0_2P_BRB_FULL_LB_XON_THR; - e3b0_val->mac_0_class_t_guarantied_hyst = - PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST; - e3b0_val->mac_1_class_t_guarantied = - PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART; - e3b0_val->mac_1_class_t_guarantied_hyst = - PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST; - - if (pfc_params->cos0_pauseable != - pfc_params->cos1_pauseable) { - /* nonpauseable= Lossy + pauseable = Lossless*/ - e3b0_val->lb_guarantied = - PFC_E3B0_2P_MIX_PAUSE_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART; - } else if (pfc_params->cos0_pauseable) { - /* Lossless +Lossless*/ - e3b0_val->lb_guarantied = - PFC_E3B0_2P_PAUSE_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART; - } else { - /* Lossy +Lossy*/ - e3b0_val->lb_guarantied = - PFC_E3B0_2P_NON_PAUSE_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART; - } - } - } else { - e3b0_val->per_class_guaranty_mode = 0; - e3b0_val->lb_guarantied_hyst = 0; - e3b0_val->full_lb_xoff_th = - DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR; - e3b0_val->full_lb_xon_threshold = - DEFAULT_E3B0_BRB_FULL_LB_XON_THR; - e3b0_val->lb_guarantied = - DEFAULT_E3B0_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART; - e3b0_val->mac_0_class_t_guarantied_hyst = - DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST; - e3b0_val->mac_1_class_t_guarantied = - DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART; - e3b0_val->mac_1_class_t_guarantied_hyst = - DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST; - } -} -static int bnx2x_update_pfc_brb(struct link_params *params, - struct link_vars *vars, - struct bnx2x_nig_brb_pfc_port_params - *pfc_params) -{ - struct bnx2x *bp = params->bp; - struct bnx2x_pfc_brb_th_val config_val = { {0} }; - struct bnx2x_pfc_brb_threshold_val *reg_th_config = - &config_val.pauseable_th; - struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0}; - const int set_pfc = params->feature_config_flags & - FEATURE_CONFIG_PFC_ENABLED; - const u8 pfc_enabled = (set_pfc && pfc_params); - int bnx2x_status = 0; - u8 port = params->port; - - /* default - pause configuration */ - reg_th_config = &config_val.pauseable_th; - bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val); - if (bnx2x_status) - return bnx2x_status; - - if (pfc_enabled) { - /* First COS */ - if (pfc_params->cos0_pauseable) - reg_th_config = &config_val.pauseable_th; - else - reg_th_config = &config_val.non_pauseable_th; - } else - reg_th_config = &config_val.default_class0; - /* - * The number of free blocks below which the pause signal to class 0 - * of MAC #n is asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 : - BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , - reg_th_config->pause_xoff); - /* - * The number of free blocks above which the pause signal to class 0 - * of MAC #n is de-asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 : - BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon); - /* - * The number of free blocks below which the full signal to class 0 - * of MAC #n is asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 : - BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff); - /* - * The number of free blocks above which the full signal to class 0 - * of MAC #n is de-asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 : - BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon); - - if (pfc_enabled) { - /* Second COS */ - if (pfc_params->cos1_pauseable) - reg_th_config = &config_val.pauseable_th; - else - reg_th_config = &config_val.non_pauseable_th; - } else - reg_th_config = &config_val.default_class1; - /* - * The number of free blocks below which the pause signal to - * class 1 of MAC #n is asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 : - BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, - reg_th_config->pause_xoff); - - /* - * The number of free blocks above which the pause signal to - * class 1 of MAC #n is de-asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 : - BRB1_REG_PAUSE_1_XON_THRESHOLD_0, - reg_th_config->pause_xon); - /* - * The number of free blocks below which the full signal to - * class 1 of MAC #n is asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 : - BRB1_REG_FULL_1_XOFF_THRESHOLD_0, - reg_th_config->full_xoff); - /* - * The number of free blocks above which the full signal to - * class 1 of MAC #n is de-asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 : - BRB1_REG_FULL_1_XON_THRESHOLD_0, - reg_th_config->full_xon); - - if (CHIP_IS_E3B0(bp)) { - bnx2x_pfc_brb_get_e3b0_config_params( - params, - &e3b0_val, - pfc_params, - pfc_enabled); - - REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE, - e3b0_val.per_class_guaranty_mode); - - /* - * The hysteresis on the guarantied buffer space for the Lb - * port before signaling XON. - */ - REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, - e3b0_val.lb_guarantied_hyst); - - /* - * The number of free blocks below which the full signal to the - * LB port is asserted. - */ - REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, - e3b0_val.full_lb_xoff_th); - /* - * The number of free blocks above which the full signal to the - * LB port is de-asserted. - */ - REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, - e3b0_val.full_lb_xon_threshold); - /* - * The number of blocks guarantied for the MAC #n port. n=0,1 - */ - - /* The number of blocks guarantied for the LB port.*/ - REG_WR(bp, BRB1_REG_LB_GUARANTIED, - e3b0_val.lb_guarantied); - - /* - * The number of blocks guarantied for the MAC #n port. - */ - REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0, - 2 * e3b0_val.mac_0_class_t_guarantied); - REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1, - 2 * e3b0_val.mac_1_class_t_guarantied); - /* - * The number of blocks guarantied for class #t in MAC0. t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED, - e3b0_val.mac_0_class_t_guarantied); - REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED, - e3b0_val.mac_0_class_t_guarantied); - /* - * The hysteresis on the guarantied buffer space for class in - * MAC0. t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST, - e3b0_val.mac_0_class_t_guarantied_hyst); - REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST, - e3b0_val.mac_0_class_t_guarantied_hyst); - - /* - * The number of blocks guarantied for class #t in MAC1.t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED, - e3b0_val.mac_1_class_t_guarantied); - REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED, - e3b0_val.mac_1_class_t_guarantied); - /* - * The hysteresis on the guarantied buffer space for class #t - * in MAC1. t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST, - e3b0_val.mac_1_class_t_guarantied_hyst); - REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST, - e3b0_val.mac_1_class_t_guarantied_hyst); - } - - return bnx2x_status; -} - /****************************************************************************** * Description: * This function is needed because NIG ARB_CREDIT_WEIGHT_X are * not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. ******************************************************************************/ -int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, - u8 cos_entry, - u32 priority_mask, u8 port) +static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, + u8 cos_entry, + u32 priority_mask, u8 port) { u32 nig_reg_rx_priority_mask_add = 0; @@ -2497,6 +2102,16 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status) port_mb[params->port].link_status), link_status); } +static void bnx2x_update_link_attr(struct link_params *params, u32 link_attr) +{ + struct bnx2x *bp = params->bp; + + if (SHMEM2_HAS(bp, link_attr_sync)) + REG_WR(bp, params->shmem2_base + + offsetof(struct shmem2_region, + link_attr_sync[params->port]), link_attr); +} + static void bnx2x_update_pfc_nig(struct link_params *params, struct link_vars *vars, struct bnx2x_nig_brb_pfc_port_params *nig_params) @@ -2511,15 +2126,13 @@ static void bnx2x_update_pfc_nig(struct link_params *params, FEATURE_CONFIG_PFC_ENABLED; DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); - /* - * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set + /* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set * MAC control frames (that are not pause packets) * will be forwarded to the XCM. */ xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK : NIG_REG_LLH0_XCM_MASK); - /* - * nig params will override non PFC params, since it's possible to + /* NIG params will override non PFC params, since it's possible to * do transition from PFC to SAFC */ if (set_pfc) { @@ -2529,7 +2142,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params, if (CHIP_IS_E3(bp)) ppp_enable = 0; else - ppp_enable = 1; + ppp_enable = 1; xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); xcm_out_en = 0; @@ -2539,7 +2152,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params, llfc_out_en = nig_params->llfc_out_en; llfc_enable = nig_params->llfc_enable; pause_enable = nig_params->pause_enable; - } else /*defaul non PFC mode - PAUSE */ + } else /* Default non PFC mode - PAUSE */ pause_enable = 1; xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : @@ -2566,7 +2179,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params, REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 : NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7); - /* output enable for RX_XCM # IF */ + /* Output enable for RX_XCM # IF */ REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN : NIG_REG_XCM0_OUT_EN, xcm_out_en); @@ -2599,14 +2212,12 @@ int bnx2x_update_pfc(struct link_params *params, struct link_vars *vars, struct bnx2x_nig_brb_pfc_port_params *pfc_params) { - /* - * The PFC and pause are orthogonal to one another, meaning when + /* The PFC and pause are orthogonal to one another, meaning when * PFC is enabled, the pause are disabled, and when PFC is * disabled, pause are set according to the pause result. */ u32 val; struct bnx2x *bp = params->bp; - int bnx2x_status = 0; u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC); if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) @@ -2616,28 +2227,25 @@ int bnx2x_update_pfc(struct link_params *params, bnx2x_update_mng(params, vars->link_status); - /* update NIG params */ + /* Update NIG params */ bnx2x_update_pfc_nig(params, vars, pfc_params); - /* update BRB params */ - bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params); - if (bnx2x_status) - return bnx2x_status; - if (!vars->link_up) - return bnx2x_status; + return 0; DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n"); - if (CHIP_IS_E3(bp)) - bnx2x_update_pfc_xmac(params, vars, 0); - else { + + if (CHIP_IS_E3(bp)) { + if (vars->mac_type == MAC_TYPE_XMAC) + bnx2x_update_pfc_xmac(params, vars, 0); + } else { val = REG_RD(bp, MISC_REG_RESET_REG_2); if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) == 0) { DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n"); bnx2x_emac_enable(params, vars, 0); - return bnx2x_status; + return 0; } if (CHIP_IS_E2(bp)) bnx2x_update_pfc_bmac2(params, vars, bmac_loopback); @@ -2651,10 +2259,9 @@ int bnx2x_update_pfc(struct link_params *params, val = 1; REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val); } - return bnx2x_status; + return 0; } - static int bnx2x_bmac1_enable(struct link_params *params, struct link_vars *vars, u8 is_lb) @@ -2674,7 +2281,7 @@ static int bnx2x_bmac1_enable(struct link_params *params, REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, wb_data, 2); - /* tx MAC SA */ + /* TX MAC SA */ wb_data[0] = ((params->mac_addr[2] << 24) | (params->mac_addr[3] << 16) | (params->mac_addr[4] << 8) | @@ -2683,7 +2290,7 @@ static int bnx2x_bmac1_enable(struct link_params *params, params->mac_addr[1]); REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2); - /* mac control */ + /* MAC control */ val = 0x3; if (is_lb) { val |= 0x4; @@ -2693,24 +2300,24 @@ static int bnx2x_bmac1_enable(struct link_params *params, wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); - /* set rx mtu */ + /* Set rx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2); bnx2x_update_pfc_bmac1(params, vars); - /* set tx mtu */ + /* Set tx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2); - /* set cnt max size */ + /* Set cnt max size */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2); - /* configure safc */ + /* Configure SAFC */ wb_data[0] = 0x1000200; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, @@ -2744,7 +2351,7 @@ static int bnx2x_bmac2_enable(struct link_params *params, udelay(30); - /* tx MAC SA */ + /* TX MAC SA */ wb_data[0] = ((params->mac_addr[2] << 24) | (params->mac_addr[3] << 16) | (params->mac_addr[4] << 8) | @@ -2763,18 +2370,18 @@ static int bnx2x_bmac2_enable(struct link_params *params, wb_data, 2); udelay(30); - /* set rx mtu */ + /* Set RX MTU */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2); udelay(30); - /* set tx mtu */ + /* Set TX MTU */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2); udelay(30); - /* set cnt max size */ + /* Set cnt max size */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2); @@ -2786,21 +2393,23 @@ static int bnx2x_bmac2_enable(struct link_params *params, static int bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars, - u8 is_lb) + u8 is_lb, u8 reset_bmac) { int rc = 0; u8 port = params->port; struct bnx2x *bp = params->bp; u32 val; - /* reset and unreset the BigMac */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - msleep(1); + /* Reset and unreset the BigMac */ + if (reset_bmac) { + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + usleep_range(1000, 2000); + } REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - /* enable access for bmac registers */ + /* Enable access for bmac registers */ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); /* Enable BMAC according to BMAC type*/ @@ -2827,38 +2436,29 @@ static int bnx2x_bmac_enable(struct link_params *params, return rc; } -static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) +static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en) { u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : NIG_REG_INGRESS_BMAC0_MEM; u32 wb_data[2]; u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); + if (CHIP_IS_E2(bp)) + bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL; + else + bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL; /* Only if the bmac is out of reset */ if (REG_RD(bp, MISC_REG_RESET_REG_2) & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && nig_bmac_enable) { - - if (CHIP_IS_E2(bp)) { - /* Clear Rx Enable bit in BMAC_CONTROL register */ - REG_RD_DMAE(bp, bmac_addr + - BIGMAC2_REGISTER_BMAC_CONTROL, - wb_data, 2); - wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; - REG_WR_DMAE(bp, bmac_addr + - BIGMAC2_REGISTER_BMAC_CONTROL, - wb_data, 2); - } else { - /* Clear Rx Enable bit in BMAC_CONTROL register */ - REG_RD_DMAE(bp, bmac_addr + - BIGMAC_REGISTER_BMAC_CONTROL, - wb_data, 2); + /* Clear Rx Enable bit in BMAC_CONTROL register */ + REG_RD_DMAE(bp, bmac_addr, wb_data, 2); + if (en) + wb_data[0] |= BMAC_CONTROL_RX_ENABLE; + else wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; - REG_WR_DMAE(bp, bmac_addr + - BIGMAC_REGISTER_BMAC_CONTROL, - wb_data, 2); - } - msleep(1); + REG_WR_DMAE(bp, bmac_addr, wb_data, 2); + usleep_range(1000, 2000); } } @@ -2870,17 +2470,16 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, u32 init_crd, crd; u32 count = 1000; - /* disable port */ + /* Disable port */ REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); - /* wait for init credit */ + /* Wait for init credit */ init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4); crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd); while ((init_crd != crd) && count) { - msleep(5); - + usleep_range(5000, 10000); crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); count--; } @@ -2897,18 +2496,18 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, line_speed == SPEED_1000 || line_speed == SPEED_2500) { REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1); - /* update threshold */ + /* Update threshold */ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); - /* update init credit */ + /* Update init credit */ init_crd = 778; /* (800-18-4) */ } else { u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16; REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); - /* update threshold */ + /* Update threshold */ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); - /* update init credit */ + /* Update init credit */ switch (line_speed) { case SPEED_10000: init_crd = thresh + 553 - 22; @@ -2923,12 +2522,12 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n", line_speed, init_crd); - /* probe the credit changes */ + /* Probe the credit changes */ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1); - msleep(5); + usleep_range(5000, 10000); REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0); - /* enable port */ + /* Enable port */ REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0); return 0; } @@ -2995,7 +2594,7 @@ static int bnx2x_cl22_write(struct bnx2x *bp, REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode & ~EMAC_MDIO_MODE_CLAUSE_45); - /* address */ + /* Address */ tmp = ((phy->addr << 21) | (reg << 16) | val | EMAC_MDIO_COMM_COMMAND_WRITE_22 | EMAC_MDIO_COMM_START_BUSY); @@ -3031,7 +2630,7 @@ static int bnx2x_cl22_read(struct bnx2x *bp, REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode & ~EMAC_MDIO_MODE_CLAUSE_45); - /* address */ + /* Address */ val = ((phy->addr << 21) | (reg << 16) | EMAC_MDIO_COMM_COMMAND_READ_22 | EMAC_MDIO_COMM_START_BUSY); @@ -3066,10 +2665,17 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, u32 val; u16 i; int rc = 0; + u32 chip_id; + if (phy->flags & FLAGS_MDC_MDIO_WA_G) { + chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | + ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); + bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl); + } + if (phy->flags & FLAGS_MDC_MDIO_WA_B0) bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, EMAC_MDIO_STATUS_10MB); - /* address */ + /* Address */ val = ((phy->addr << 21) | (devad << 16) | reg | EMAC_MDIO_COMM_COMMAND_ADDRESS | EMAC_MDIO_COMM_START_BUSY); @@ -3090,7 +2696,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, *ret_val = 0; rc = -EFAULT; } else { - /* data */ + /* Data */ val = ((phy->addr << 21) | (devad << 16) | EMAC_MDIO_COMM_COMMAND_READ_45 | EMAC_MDIO_COMM_START_BUSY); @@ -3134,12 +2740,18 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, u32 tmp; u8 i; int rc = 0; + u32 chip_id; + if (phy->flags & FLAGS_MDC_MDIO_WA_G) { + chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | + ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); + bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl); + } + if (phy->flags & FLAGS_MDC_MDIO_WA_B0) bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, EMAC_MDIO_STATUS_10MB); - /* address */ - + /* Address */ tmp = ((phy->addr << 21) | (devad << 16) | reg | EMAC_MDIO_COMM_COMMAND_ADDRESS | EMAC_MDIO_COMM_START_BUSY); @@ -3159,7 +2771,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, netdev_err(bp->dev, "MDC/MDIO access timeout\n"); rc = -EFAULT; } else { - /* data */ + /* Data */ tmp = ((phy->addr << 21) | (devad << 16) | val | EMAC_MDIO_COMM_COMMAND_WRITE_45 | EMAC_MDIO_COMM_START_BUSY); @@ -3194,6 +2806,245 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, EMAC_MDIO_STATUS_10MB); return rc; } + +/******************************************************************/ +/* EEE section */ +/******************************************************************/ +static u8 bnx2x_eee_has_cap(struct link_params *params) +{ + struct bnx2x *bp = params->bp; + + if (REG_RD(bp, params->shmem2_base) <= + offsetof(struct shmem2_region, eee_status[params->port])) + return 0; + + return 1; +} + +static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer) +{ + switch (nvram_mode) { + case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED: + *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME; + break; + case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE: + *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME; + break; + case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY: + *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME; + break; + default: + *idle_timer = 0; + break; + } + + return 0; +} + +static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode) +{ + switch (idle_timer) { + case EEE_MODE_NVRAM_BALANCED_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED; + break; + case EEE_MODE_NVRAM_AGGRESSIVE_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE; + break; + case EEE_MODE_NVRAM_LATENCY_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY; + break; + default: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED; + break; + } + + return 0; +} + +static u32 bnx2x_eee_calc_timer(struct link_params *params) +{ + u32 eee_mode, eee_idle; + struct bnx2x *bp = params->bp; + + if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) { + if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { + /* time value in eee_mode --> used directly*/ + eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK; + } else { + /* hsi value in eee_mode --> time */ + if (bnx2x_eee_nvram_to_time(params->eee_mode & + EEE_MODE_NVRAM_MASK, + &eee_idle)) + return 0; + } + } else { + /* hsi values in nvram --> time*/ + eee_mode = ((REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port]. + eee_power_mode)) & + PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> + PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); + + if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle)) + return 0; + } + + return eee_idle; +} + +static int bnx2x_eee_set_timers(struct link_params *params, + struct link_vars *vars) +{ + u32 eee_idle = 0, eee_mode; + struct bnx2x *bp = params->bp; + + eee_idle = bnx2x_eee_calc_timer(params); + + if (eee_idle) { + REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2), + eee_idle); + } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) && + (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) && + (params->eee_mode & EEE_MODE_OUTPUT_TIME)) { + DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n"); + return -EINVAL; + } + + vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT); + if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { + /* eee_idle in 1u --> eee_status in 16u */ + eee_idle >>= 4; + vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) | + SHMEM_EEE_TIME_OUTPUT_BIT; + } else { + if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode)) + return -EINVAL; + vars->eee_status |= eee_mode; + } + + return 0; +} + +static int bnx2x_eee_initial_config(struct link_params *params, + struct link_vars *vars, u8 mode) +{ + vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT; + + /* Propogate params' bits --> vars (for migration exposure) */ + if (params->eee_mode & EEE_MODE_ENABLE_LPI) + vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT; + else + vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT; + + if (params->eee_mode & EEE_MODE_ADV_LPI) + vars->eee_status |= SHMEM_EEE_REQUESTED_BIT; + else + vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT; + + return bnx2x_eee_set_timers(params, vars); +} + +static int bnx2x_eee_disable(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + + /* Make Certain LPI is disabled */ + REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0); + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0); + + vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; + + return 0; +} + +static int bnx2x_eee_advertise(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars, u8 modes) +{ + struct bnx2x *bp = params->bp; + u16 val = 0; + + /* Mask events preventing LPI generation */ + REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20); + + if (modes & SHMEM_EEE_10G_ADV) { + DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n"); + val |= 0x8; + } + if (modes & SHMEM_EEE_1G_ADV) { + DP(NETIF_MSG_LINK, "Advertise 1GBase-T EEE\n"); + val |= 0x4; + } + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val); + + vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; + vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT); + + return 0; +} + +static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status) +{ + struct bnx2x *bp = params->bp; + + if (bnx2x_eee_has_cap(params)) + REG_WR(bp, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port]), eee_status); +} + +static void bnx2x_eee_an_resolve(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 adv = 0, lp = 0; + u32 lp_adv = 0; + u8 neg = 0; + + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp); + + if (lp & 0x2) { + lp_adv |= SHMEM_EEE_100M_ADV; + if (adv & 0x2) { + if (vars->line_speed == SPEED_100) + neg = 1; + DP(NETIF_MSG_LINK, "EEE negotiated - 100M\n"); + } + } + if (lp & 0x14) { + lp_adv |= SHMEM_EEE_1G_ADV; + if (adv & 0x14) { + if (vars->line_speed == SPEED_1000) + neg = 1; + DP(NETIF_MSG_LINK, "EEE negotiated - 1G\n"); + } + } + if (lp & 0x68) { + lp_adv |= SHMEM_EEE_10G_ADV; + if (adv & 0x68) { + if (vars->line_speed == SPEED_10000) + neg = 1; + DP(NETIF_MSG_LINK, "EEE negotiated - 10G\n"); + } + } + + vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK; + vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT); + + if (neg) { + DP(NETIF_MSG_LINK, "EEE is active\n"); + vars->eee_status |= SHMEM_EEE_ACTIVE_BIT; + } + +} + /******************************************************************/ /* BSC access functions from E3 */ /******************************************************************/ @@ -3224,7 +3075,7 @@ static void bnx2x_bsc_module_sel(struct link_params *params) } static int bnx2x_bsc_read(struct link_params *params, - struct bnx2x_phy *phy, + struct bnx2x *bp, u8 sl_devid, u16 sl_addr, u8 lc_addr, @@ -3233,12 +3084,6 @@ static int bnx2x_bsc_read(struct link_params *params, { u32 val, i; int rc = 0; - struct bnx2x *bp = params->bp; - - if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) { - DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid); - return -EINVAL; - } if (xfer_cnt > 16) { DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n", @@ -3249,23 +3094,23 @@ static int bnx2x_bsc_read(struct link_params *params, xfer_cnt = 16 - lc_addr; - /* enable the engine */ + /* Enable the engine */ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); val |= MCPR_IMC_COMMAND_ENABLE; REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); - /* program slave device ID */ + /* Program slave device ID */ val = (sl_devid << 16) | sl_addr; REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val); - /* start xfer with 0 byte to update the address pointer ???*/ + /* Start xfer with 0 byte to update the address pointer ???*/ val = (MCPR_IMC_COMMAND_ENABLE) | (MCPR_IMC_COMMAND_WRITE_OP << MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0); REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); - /* poll for completion */ + /* Poll for completion */ i = 0; val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { @@ -3281,7 +3126,7 @@ static int bnx2x_bsc_read(struct link_params *params, if (rc == -EFAULT) return rc; - /* start xfer with read op */ + /* Start xfer with read op */ val = (MCPR_IMC_COMMAND_ENABLE) | (MCPR_IMC_COMMAND_READ_OP << MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | @@ -3289,7 +3134,7 @@ static int bnx2x_bsc_read(struct link_params *params, (xfer_cnt); REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); - /* poll for completion */ + /* Poll for completion */ i = 0; val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { @@ -3324,12 +3169,20 @@ static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, devad, reg, val | or_val); } +static void bnx2x_cl45_read_and_write(struct bnx2x *bp, + struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 and_val) +{ + u16 val; + bnx2x_cl45_read(bp, phy, devad, reg, &val); + bnx2x_cl45_write(bp, phy, devad, reg, val & and_val); +} + int bnx2x_phy_read(struct link_params *params, u8 phy_addr, u8 devad, u16 reg, u16 *ret_val) { u8 phy_index; - /* - * Probe for the phy according to the given phy_addr, and execute + /* Probe for the phy according to the given phy_addr, and execute * the read request on it */ for (phy_index = 0; phy_index < params->num_phys; phy_index++) { @@ -3346,8 +3199,7 @@ int bnx2x_phy_write(struct link_params *params, u8 phy_addr, u8 devad, u16 reg, u16 val) { u8 phy_index; - /* - * Probe for the phy according to the given phy_addr, and execute + /* Probe for the phy according to the given phy_addr, and execute * the write request on it */ for (phy_index = 0; phy_index < params->num_phys; phy_index++) { @@ -3373,7 +3225,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy, if (bnx2x_is_4_port_mode(bp)) { u32 port_swap, port_swap_ovr; - /*figure out path swap value */ + /* Figure out path swap value */ path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR); if (path_swap_ovr & 0x1) path_swap = (path_swap_ovr & 0x2); @@ -3383,7 +3235,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy, if (path_swap) path = path ^ 1; - /*figure out port swap value */ + /* Figure out port swap value */ port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR); if (port_swap_ovr & 0x1) port_swap = (port_swap_ovr & 0x2); @@ -3394,9 +3246,9 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy, port = port ^ 1; lane = (port<<1) + path; - } else { /* two port mode - no port swap */ + } else { /* Two port mode - no port swap */ - /*figure out path swap value */ + /* Figure out path swap value */ path_swap_ovr = REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR); if (path_swap_ovr & 0x1) { @@ -3428,8 +3280,7 @@ static void bnx2x_set_aer_mmd(struct link_params *params, if (USES_WARPCORE(bp)) { aer_val = bnx2x_get_warpcore_lane(phy, params); - /* - * In Dual-lane mode, two lanes are joined together, + /* In Dual-lane mode, two lanes are joined together, * so in order to configure them, the AER broadcast method is * used here. * 0x200 is the broadcast address for lanes 0,1 @@ -3473,7 +3324,7 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port) val = SERDES_RESET_BITS << (port*16); - /* reset and unreset the SerDes/XGXS */ + /* Reset and unreset the SerDes/XGXS */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); udelay(500); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); @@ -3484,6 +3335,21 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port) DEFAULT_PHY_DEV_ADDR); } +static void bnx2x_xgxs_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + switch (action) { + case PHY_INIT: + /* Set correct devad */ + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0); + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18, + phy->def_md_devad); + break; + } +} + static void bnx2x_xgxs_deassert(struct link_params *params) { struct bnx2x *bp = params->bp; @@ -3494,14 +3360,12 @@ static void bnx2x_xgxs_deassert(struct link_params *params) val = XGXS_RESET_BITS << (port*16); - /* reset and unreset the SerDes/XGXS */ + /* Reset and unreset the SerDes/XGXS */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); udelay(500); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); - - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0); - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, - params->phy[INT_PHY].def_md_devad); + bnx2x_xgxs_specific_func(¶ms->phy[INT_PHY], params, + PHY_INIT); } static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, @@ -3509,20 +3373,25 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; - /** - * resolve pause mode and advertisement Please refer to Table + /* Resolve pause mode and advertisement Please refer to Table * 28B-3 of the 802.3ab-1999 spec */ switch (phy->req_flow_ctrl) { case BNX2X_FLOW_CTRL_AUTO: - if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) + switch (params->req_fc_auto_adv) { + case BNX2X_FLOW_CTRL_BOTH: *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; - else + break; + case BNX2X_FLOW_CTRL_RX: + case BNX2X_FLOW_CTRL_TX: *ieee_fc |= - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + break; + default: + break; + } break; - case BNX2X_FLOW_CTRL_TX: *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; break; @@ -3587,7 +3456,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params, { u16 val; struct bnx2x *bp = params->bp; - /* read modify write pause advertizing */ + /* Read modify write pause advertizing */ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; @@ -3633,47 +3502,81 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE; if (pause_result & (1<<1)) vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE; + } -static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { - struct bnx2x *bp = params->bp; u16 ld_pause; /* local */ u16 lp_pause; /* link partner */ u16 pause_result; - u8 ret = 0; - /* read twice */ + struct bnx2x *bp = params->bp; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) { + bnx2x_cl22_read(bp, phy, 0x4, &ld_pause); + bnx2x_cl22_read(bp, phy, 0x5, &lp_pause); + } else if (CHIP_IS_E3(bp) && + SINGLE_MEDIA_DIRECT(params)) { + u8 lane = bnx2x_get_warpcore_lane(phy, params); + u16 gp_status, gp_mask; + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_4, + &gp_status); + gp_mask = (MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL | + MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP) << + lane; + if ((gp_status & gp_mask) == gp_mask) { + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_ADV_PAUSE, &ld_pause); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); + } else { + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LD, &ld_pause); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LP, &lp_pause); + ld_pause = ((ld_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) + << 3); + lp_pause = ((lp_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) + << 3); + } + } else { + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_ADV_PAUSE, &ld_pause); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); + } + pause_result = (ld_pause & + MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; + pause_result |= (lp_pause & + MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; + DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", pause_result); + bnx2x_pause_resolve(vars, pause_result); - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; +} - if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) +static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u8 ret = 0; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) { + /* Update the advertised flow-controled of LD/LP in AN */ + if (phy->req_line_speed == SPEED_AUTO_NEG) + bnx2x_ext_phy_update_adv_fc(phy, params, vars); + /* But set the flow-control result as the requested one */ vars->flow_ctrl = phy->req_flow_ctrl; - else if (phy->req_line_speed != SPEED_AUTO_NEG) + } else if (phy->req_line_speed != SPEED_AUTO_NEG) vars->flow_ctrl = params->req_fc_auto_adv; else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { ret = 1; - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) { - bnx2x_cl22_read(bp, phy, - 0x4, &ld_pause); - bnx2x_cl22_read(bp, phy, - 0x5, &lp_pause); - } else { - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_ADV_PAUSE, &ld_pause); - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); - } - pause_result = (ld_pause & - MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; - pause_result |= (lp_pause & - MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; - DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", - pause_result); - bnx2x_pause_resolve(vars, pause_result); + bnx2x_ext_phy_update_adv_fc(phy, params, vars); } return ret; } @@ -3685,52 +3588,182 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, * init configuration, and set/clear SGMII flag. Internal * phy init is done purely in phy_init stage. */ +#define WC_TX_DRIVER(post2, idriver, ipre) \ + ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \ + (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \ + (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)) + +#define WC_TX_FIR(post, main, pre) \ + ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \ + (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \ + (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)) + +static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 i; + static struct bnx2x_reg_set reg_set[] = { + /* Step 1 - Program the TX/RX alignment markers */ + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0xa157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xcbe2}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0x7537}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0xa157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xcbe2}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0x7537}, + /* Step 2 - Configure the NP registers */ + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000a}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6400}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0620}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x6464}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x3150}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x3150}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0620} + }; + DP(NETIF_MSG_LINK, "Enabling 20G-KR2\n"); + + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL49_USERB0_CTRL, (3<<6)); + + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + + /* Start KR2 work-around timer which handles BCM8073 link-parner */ + vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE; + bnx2x_update_link_attr(params, vars->link_attr_sync); +} + +static void bnx2x_disable_kr2(struct link_params *params, + struct link_vars *vars, + struct bnx2x_phy *phy) +{ + struct bnx2x *bp = params->bp; + int i; + static struct bnx2x_reg_set reg_set[] = { + /* Step 1 - Program the TX/RX alignment markers */ + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000} + }; + DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n"); + + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; + bnx2x_update_link_attr(params, vars->link_attr_sync); + + vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; +} + +static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + + DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n"); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC5, 0xc000); +} + +static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy, + struct link_params *params) +{ + /* Restart autoneg on the leading lane only */ + struct bnx2x *bp = params->bp; + u16 lane = bnx2x_get_warpcore_lane(phy, params); + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, lane); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200); + + /* Restore AER */ + bnx2x_set_aer_mmd(params, phy); +} + static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { - u16 val16 = 0, lane, bam37 = 0; - struct bnx2x *bp = params->bp; + u16 lane, i, cl72_ctrl, an_adv = 0, val; + u32 wc_lane_config; + struct bnx2x *bp = params->bp; + static struct bnx2x_reg_set reg_set[] = { + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, + {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190}, + /* Disable Autoneg: re-enable it after adv is done. */ + {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0}, + {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0}, + }; DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); + /* Set to default registers that may be overriden by 10G force */ + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); - /* Disable Autoneg: re-enable it after adv is done. */ - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, 0); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl); + cl72_ctrl &= 0x08ff; + cl72_ctrl |= 0x3800; + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl); /* Check adding advertisement for 1G KX */ if (((vars->line_speed == SPEED_AUTO_NEG) && (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || (vars->line_speed == SPEED_1000)) { - u16 sd_digital; - val16 |= (1<<5); + u16 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; + an_adv |= (1<<5); /* Enable CL37 1G Parallel Detect */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, - (sd_digital | 0x1)); - + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1); DP(NETIF_MSG_LINK, "Advertize 1G\n"); } if (((vars->line_speed == SPEED_AUTO_NEG) && (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || (vars->line_speed == SPEED_10000)) { /* Check adding advertisement for 10G KR */ - val16 |= (1<<7); + an_adv |= (1<<7); /* Enable 10G Parallel Detect */ - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_PAR_DET_10G_CTRL, 1); + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_PAR_DET_10G_CTRL, 1); + bnx2x_set_aer_mmd(params, phy); DP(NETIF_MSG_LINK, "Advertize 10G\n"); } /* Set Transmit PMD settings */ lane = bnx2x_get_warpcore_lane(phy, params); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, - ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | - (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | - (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))); + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, + WC_TX_DRIVER(0x02, 0x06, 0x09)); + /* Configure the next lane if dual mode */ + if (phy->flags & FLAGS_WC_DUAL_MODE) + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1), + WC_TX_DRIVER(0x02, 0x06, 0x09)); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, 0x03f0); @@ -3740,7 +3773,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, /* Advertised speeds */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv); /* Advertised and set FEC (Forward Error Correction) */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, @@ -3753,92 +3786,112 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, offsetof(struct shmem_region, dev_info. port_hw_config[params->port].default_cfg)) & PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, + 1); DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); } /* Advertise pause */ bnx2x_ext_phy_set_pause(params, phy, vars); - - /* - * Set KR Autoneg Work-Around flag for Warpcore version older than D108 - */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_UC_INFO_B1_VERSION, &val16); - if (val16 < 0xd108) { - DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); - vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; - } - - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL5_MISC7, &val16); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100); + vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC7, 0x100); /* Over 1G - AN local device user page 1 */ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1f); - /* Enable Autoneg */ - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000); - -} + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) || + (phy->req_line_speed == SPEED_20000)) { -static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u16 val; + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, lane); - /* Disable Autoneg */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX1_PCI_CTRL + (0x10*lane), + (1<<11)); - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_PAR_DET_10G_CTRL, 0); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXS_X2_CONTROL3, 0x7); + bnx2x_set_aer_mmd(params, phy); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00); + bnx2x_warpcore_enable_AN_KR2(phy, params, vars); + } else { + /* Enable Auto-Detect to support 1G over CL37 as well */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10); + wc_lane_config = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + shared_hw_config.wc_lane_config)); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val); + /* Force cl48 sync_status LOW to avoid getting stuck in CL73 + * parallel-detect loop when CL73 and CL37 are enabled. + */ + val |= 1 << 11; - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0); + /* Restore Polarity settings in case it was run over by + * previous link owner + */ + if (wc_lane_config & + (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane)) + val |= 3 << 2; + else + val &= ~(3 << 2); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), + val); - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); + bnx2x_disable_kr2(params, vars, phy); + } - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL3_UP1, 0x1); + /* Enable Autoneg: only on the main lane */ + bnx2x_warpcore_restart_AN_KR(phy, params); +} - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL5_MISC7, 0xa); +static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 val16, i, lane; + static struct bnx2x_reg_set reg_set[] = { + /* Disable Autoneg */ + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, + 0x3f00}, + {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0}, + {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1}, + {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa}, + /* Leave cl72 training enable, needed for KR */ + {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2} + }; + + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + lane = bnx2x_get_warpcore_lane(phy, params); + /* Global registers */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); /* Disable CL36 PCS Tx */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); + val16 &= ~(0x0011 << lane); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0); - - /* Double Wide Single Data Rate @ pll rate */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF); - - /* Leave cl72 training enable, needed for KR */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, - 0x2); + MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); - /* Leave CL72 enabled */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, - &val); + MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); + val16 |= (0x0303 << (lane << 1)); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, - val | 0x3800); - + MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); + /* Restore AER */ + bnx2x_set_aer_mmd(params, phy); /* Set speed via PMA/PMD register */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); @@ -3846,7 +3899,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB); - /*Enable encoded forced speed */ + /* Enable encoded forced speed */ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30); @@ -3858,7 +3911,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0xF9); - /* set and clear loopback to cause a reset to 64/66 decoder */ + /* Set and clear loopback to cause a reset to 64/66 decoder */ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, @@ -3872,45 +3925,35 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u16 misc1_val, tap_val, tx_driver_val, lane, val; + u32 cfg_tap_val, tx_drv_brdct, tx_equal; + /* Hold rxSeqStart */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000)); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000); /* Hold tx_fifo_reset */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1)); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1); /* Disable CL73 AN */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); /* Disable 100FX Enable and Auto-Detect */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL1, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA)); + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL1, 0xFFFA); /* Disable 100FX Idle detect */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL3, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL3, (val | 0x0080)); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL3, 0x0080); /* Set Block address to Remote PHY & Clear forced_speed[5] */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC3, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F)); + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0xFF7F); /* Turn off auto-detect & fiber mode */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, - (val & 0xFFEE)); + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + 0xFFEE); /* Set filter_force_link, disable_false_link and parallel_detect */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, @@ -3927,23 +3970,33 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, if (is_xfi) { misc1_val |= 0x5; - tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | - (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | - (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); - tx_driver_val = - ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | - (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | - (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); - + tap_val = WC_TX_FIR(0x08, 0x37, 0x00); + tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03); } else { + cfg_tap_val = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port]. + sfi_tap_values)); + + tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK; + + tx_drv_brdct = (cfg_tap_val & + PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >> + PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT; + misc1_val |= 0x9; - tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | - (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | - (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); - tx_driver_val = - ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | - (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | - (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); + + /* TAP values are controlled by nvram, if value there isn't 0 */ + if (tx_equal) + tap_val = (u16)tx_equal; + else + tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02); + + if (tx_drv_brdct) + tx_driver_val = WC_TX_DRIVER(0x03, (u16)tx_drv_brdct, + 0x06); + else + tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06); } bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); @@ -3958,38 +4011,79 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, tx_driver_val); /* Enable fiber mode, enable and invert sig_det */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd); /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC3, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0x8080); + + bnx2x_warpcore_set_lpi_passthrough(phy, params); /* 10G XFI Full Duplex */ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100); /* Release tx_fifo_reset */ + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, + 0xFFFE); + /* Release rxSeqStart */ + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x7FFF); +} + +static void bnx2x_warpcore_set_20G_force_KR2(struct bnx2x_phy *phy, + struct link_params *params) +{ + u16 val; + struct bnx2x *bp = params->bp; + /* Set global registers, so set AER lane to 0 */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + + /* Disable sequencer */ + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~(1<<13)); + + bnx2x_set_aer_mmd(params, phy); + + bnx2x_cl45_read_and_write(bp, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_PMD_KR_CONTROL, ~(1<<1)); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_CTRL, 0); + /* Turn off CL73 */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val); + MDIO_WC_REG_CL73_USERB0_CTRL, &val); + val &= ~(1<<5); + val |= (1<<6); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE); + MDIO_WC_REG_CL73_USERB0_CTRL, val); + + /* Set 20G KR2 force speed */ + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x1f); + + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, (1<<7)); - /* Release rxSeqStart */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val); + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &val); + val &= ~(3<<14); + val |= (1<<15); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF)); -} + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0x835A); -static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp, - struct bnx2x_phy *phy) -{ - DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n"); + /* Enable sequencer (over lane 0) */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, (1<<13)); + + bnx2x_set_aer_mmd(params, phy); } static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp, @@ -4039,15 +4133,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp, /* Set Transmit PMD settings */ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, - ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | - (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | - (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) | - MDIO_WC_REG_TX_FIR_TAP_ENABLE)); + (WC_TX_FIR(0x12, 0x2d, 0x00) | + MDIO_WC_REG_TX_FIR_TAP_ENABLE)); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, - ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | - (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | - (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))); + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, + WC_TX_DRIVER(0x02, 0x02, 0x02)); } static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, @@ -4059,18 +4149,16 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, u16 val16, digctrl_kx1, digctrl_kx2; /* Clear XFI clock comp in non-10G single lane mode. */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_CONTROL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13)); + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, ~(3<<13)); + + bnx2x_warpcore_set_lpi_passthrough(phy, params); if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) { /* SGMII Autoneg */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, - val16 | 0x1000); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, + 0x1000); DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n"); } else { bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, @@ -4157,40 +4245,35 @@ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy, u16 lane) { struct bnx2x *bp = params->bp; - u16 val16; - + u16 i; + static struct bnx2x_reg_set wc_regs[] = { + {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a}, + {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800}, + {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + 0x0195}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + 0x0007}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, + 0x0002}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040}, + {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140} + }; /* Set XFI clock comp as default. */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_CONTROL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13)); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, (3<<13)); + + for (i = 0; i < ARRAY_SIZE(wc_regs); i++) + bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg, + wc_regs[i].val); - bnx2x_warpcore_reset_lane(bp, phy, 1); - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL1, 0x014a); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL3, 0x0800); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC3, 0x8008); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000); lane = bnx2x_get_warpcore_lane(phy, params); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_TX_FIR_TAP, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140); - bnx2x_warpcore_reset_lane(bp, phy, 0); + } static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, @@ -4208,8 +4291,7 @@ static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, PORT_HW_CFG_E3_MOD_ABS_MASK) >> PORT_HW_CFG_E3_MOD_ABS_SHIFT; - /* - * Should not happen. This function called upon interrupt + /* Should not happen. This function called upon interrupt * triggered by GPIO ( since EPIO can only generate interrupts * to MCP). * So if this function was called and none of the GPIOs was set, @@ -4218,7 +4300,7 @@ static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, if ((cfg_pin < PIN_CFG_GPIO0_P0) || (cfg_pin > PIN_CFG_GPIO3_P1)) { DP(NETIF_MSG_LINK, - "ERROR: Invalid cfg pin %x for module detect indication\n", + "No cfg pin %x for module detect indication\n", cfg_pin); return -EINVAL; } @@ -4229,7 +4311,7 @@ static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, *gpio_num = MISC_REGISTERS_GPIO_3; *gpio_port = port; } - DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port); + return 0; } @@ -4252,7 +4334,7 @@ static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy, return 0; } static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy, - struct link_params *params) + struct link_params *params) { u16 gp2_status_reg0, lane; struct bnx2x *bp = params->bp; @@ -4266,26 +4348,20 @@ static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy, } static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; u32 serdes_net_if; u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0; - u16 lane = bnx2x_get_warpcore_lane(phy, params); vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1; if (!vars->turn_to_run_wc_rt) return; - /* return if there is no link partner */ - if (!(bnx2x_warpcore_get_sigdet(phy, params))) { - DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n"); - return; - } - if (vars->rx_tx_asic_rst) { + u16 lane = bnx2x_get_warpcore_lane(phy, params); serdes_net_if = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, dev_info. port_hw_config[params->port].default_cfg)) & @@ -4295,25 +4371,19 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, case PORT_HW_CFG_NET_SERDES_IF_KR: /* Do we get link yet? */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 0x81d1, - &gp_status1); + &gp_status1); lnkup = (gp_status1 >> (8+lane)) & 0x1;/* 1G */ /*10G KR*/ lnkup_kr = (gp_status1 >> (12+lane)) & 0x1; - DP(NETIF_MSG_LINK, - "gp_status1 0x%x\n", gp_status1); - if (lnkup_kr || lnkup) { - vars->rx_tx_asic_rst = 0; - DP(NETIF_MSG_LINK, - "link up, rx_tx_asic_rst 0x%x\n", - vars->rx_tx_asic_rst); + vars->rx_tx_asic_rst = 0; } else { - /*reset the lane to see if link comes up.*/ + /* Reset the lane to see if link comes up.*/ bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_warpcore_reset_lane(bp, phy, 0); - /* restart Autoneg */ + /* Restart Autoneg */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200); @@ -4330,6 +4400,43 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, } /*params->rx_tx_asic_rst*/ } +static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy, + struct link_params *params) +{ + u16 lane = bnx2x_get_warpcore_lane(phy, params); + struct bnx2x *bp = params->bp; + bnx2x_warpcore_clear_regs(phy, params, lane); + if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] == + SPEED_10000) && + (phy->media_type != ETH_PHY_SFP_1G_FIBER)) { + DP(NETIF_MSG_LINK, "Setting 10G SFI\n"); + bnx2x_warpcore_set_10G_XFI(phy, params, 0); + } else { + DP(NETIF_MSG_LINK, "Setting 1G Fiber\n"); + bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0); + } +} + +static void bnx2x_sfp_e3_set_transmitter(struct link_params *params, + struct bnx2x_phy *phy, + u8 tx_en) +{ + struct bnx2x *bp = params->bp; + u32 cfg_pin; + u8 port = params->port; + + cfg_pin = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_TX_LASER_MASK; + /* Set the !tx_en since this pin is DISABLE_TX_LASER */ + DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en); + + /* For 20G, the expected pin to be used is 3 pins after the current */ + bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1); + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G) + bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1); +} static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, struct link_params *params, @@ -4347,7 +4454,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, "serdes_net_if = 0x%x\n", vars->line_speed, serdes_net_if); bnx2x_set_aer_mmd(params, phy); - + bnx2x_warpcore_reset_lane(bp, phy, 1); vars->phy_flags |= PHY_XGXS_FLAG; if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) || (phy->req_line_speed && @@ -4361,7 +4468,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, switch (serdes_net_if) { case PORT_HW_CFG_NET_SERDES_IF_KR: /* Enable KR Auto Neg */ - if (params->loopback_mode == LOOPBACK_NONE) + if (params->loopback_mode != LOOPBACK_EXT) bnx2x_warpcore_enable_AN_KR(phy, params, vars); else { DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n"); @@ -4391,19 +4498,20 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, break; case PORT_HW_CFG_NET_SERDES_IF_SFI: - - bnx2x_warpcore_clear_regs(phy, params, lane); - if (vars->line_speed == SPEED_10000) { - DP(NETIF_MSG_LINK, "Setting 10G SFI\n"); - bnx2x_warpcore_set_10G_XFI(phy, params, 0); - } else if (vars->line_speed == SPEED_1000) { - DP(NETIF_MSG_LINK, "Setting 1G Fiber\n"); - bnx2x_warpcore_set_sgmii_speed( - phy, params, 1, 0); + /* Issue Module detection if module is plugged, or + * enabled transmitter to avoid current leakage in case + * no module is connected + */ + if ((params->loopback_mode == LOOPBACK_NONE) || + (params->loopback_mode == LOOPBACK_EXT)) { + if (bnx2x_is_sfp_module_plugged(phy, params)) + bnx2x_sfp_module_detection(phy, params); + else + bnx2x_sfp_e3_set_transmitter(params, + phy, 1); } - /* Issue Module detection */ - if (bnx2x_is_sfp_module_plugged(phy, params)) - bnx2x_sfp_module_detection(phy, params); + + bnx2x_warpcore_config_sfi(phy, params); break; case PORT_HW_CFG_NET_SERDES_IF_DXGXS: @@ -4417,16 +4525,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, bnx2x_sfp_module_detection(phy, params); break; - case PORT_HW_CFG_NET_SERDES_IF_KR2: - if (vars->line_speed != SPEED_20000) { - DP(NETIF_MSG_LINK, "Speed not supported yet\n"); - return; + if (!params->loopback_mode) { + bnx2x_warpcore_enable_AN_KR(phy, params, vars); + } else { + DP(NETIF_MSG_LINK, "Setting KR 20G-Force\n"); + bnx2x_warpcore_set_20G_force_KR2(phy, params); } - DP(NETIF_MSG_LINK, "Setting 20G KR2\n"); - bnx2x_warpcore_set_20G_KR2(bp, phy); break; - default: DP(NETIF_MSG_LINK, "Unsupported Serdes Net Interface 0x%x\n", @@ -4440,67 +4546,58 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Exit config init\n"); } -static void bnx2x_sfp_e3_set_transmitter(struct link_params *params, - struct bnx2x_phy *phy, - u8 tx_en) -{ - struct bnx2x *bp = params->bp; - u32 cfg_pin; - u8 port = params->port; - - cfg_pin = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].e3_sfp_ctrl)) & - PORT_HW_CFG_TX_LASER_MASK; - /* Set the !tx_en since this pin is DISABLE_TX_LASER */ - DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en); - /* For 20G, the expected pin to be used is 3 pins after the current */ - - bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1); - if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G) - bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1); -} - static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy, struct link_params *params) { struct bnx2x *bp = params->bp; - u16 val16; + u16 val16, lane; bnx2x_sfp_e3_set_transmitter(params, phy, 0); - bnx2x_set_mdio_clk(bp, params->chip_id, params->port); + bnx2x_set_mdio_emac_per_phy(bp, params); bnx2x_set_aer_mmd(params, phy); /* Global register */ bnx2x_warpcore_reset_lane(bp, phy, 1); /* Clear loopback settings (if any) */ /* 10G & 20G */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 & - 0xBFFF); + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0xBFFF); - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe); + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0xfffe); /* Update those 1-copy registers */ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, MDIO_AER_BLOCK_AER_REG, 0); - /* Enable 1G MDIO (1-copy) */ + /* Enable 1G MDIO (1-copy) */ + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + ~0x10); + + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, 0xff00); + lane = bnx2x_get_warpcore_lane(phy, params); + /* Disable CL36 PCS Tx */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, - &val16); + MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); + val16 |= (0x11 << lane); + if (phy->flags & FLAGS_WC_DUAL_MODE) + val16 |= (0x22 << lane); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, - val16 & ~0x10); + MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16); + MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); + val16 &= ~(0x0303 << (lane << 1)); + val16 |= (0x0101 << (lane << 1)); + if (phy->flags & FLAGS_WC_DUAL_MODE) { + val16 &= ~(0x0c0c << (lane << 1)); + val16 |= (0x0404 << (lane << 1)); + } + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK1_LANECTRL2, - val16 & 0xff00); + MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); + /* Restore AER */ + bnx2x_set_aer_mmd(params, phy); } @@ -4513,47 +4610,44 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n", params->loopback_mode, phy->req_line_speed); - if (phy->req_line_speed < SPEED_10000) { - /* 10/100/1000 */ + if (phy->req_line_speed < SPEED_10000 || + phy->supported & SUPPORTED_20000baseKR2_Full) { + /* 10/100/1000/20G-KR2 */ /* Update those 1-copy registers */ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, MDIO_AER_BLOCK_AER_REG, 0); /* Enable 1G MDIO (1-copy) */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, - &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, - val16 | 0x10); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + 0x10); /* Set 1G loopback based on lane (1-copy) */ lane = bnx2x_get_warpcore_lane(phy, params); bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16); + val16 |= (1<<lane); + if (phy->flags & FLAGS_WC_DUAL_MODE) + val16 |= (2<<lane); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK1_LANECTRL2, - val16 | (1<<lane)); + MDIO_WC_REG_XGXSBLK1_LANECTRL2, + val16); /* Switch back to 4-copy registers */ bnx2x_set_aer_mmd(params, phy); } else { - /* 10G & 20G */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 | - 0x4000); - - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1); + /* 10G / 20G-DXGXS */ + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, + 0x4000); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1); } } -void bnx2x_sync_link(struct link_params *params, - struct link_vars *vars) + +static void bnx2x_sync_link(struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; u8 link_10g_plus; @@ -4567,43 +4661,43 @@ void bnx2x_sync_link(struct link_params *params, vars->duplex = DUPLEX_FULL; switch (vars->link_status & LINK_STATUS_SPEED_AND_DUPLEX_MASK) { - case LINK_10THD: - vars->duplex = DUPLEX_HALF; - /* fall thru */ - case LINK_10TFD: - vars->line_speed = SPEED_10; - break; + case LINK_10THD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case LINK_10TFD: + vars->line_speed = SPEED_10; + break; - case LINK_100TXHD: - vars->duplex = DUPLEX_HALF; - /* fall thru */ - case LINK_100T4: - case LINK_100TXFD: - vars->line_speed = SPEED_100; - break; + case LINK_100TXHD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case LINK_100T4: + case LINK_100TXFD: + vars->line_speed = SPEED_100; + break; - case LINK_1000THD: - vars->duplex = DUPLEX_HALF; - /* fall thru */ - case LINK_1000TFD: - vars->line_speed = SPEED_1000; - break; + case LINK_1000THD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case LINK_1000TFD: + vars->line_speed = SPEED_1000; + break; - case LINK_2500THD: - vars->duplex = DUPLEX_HALF; - /* fall thru */ - case LINK_2500TFD: - vars->line_speed = SPEED_2500; - break; + case LINK_2500THD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case LINK_2500TFD: + vars->line_speed = SPEED_2500; + break; - case LINK_10GTFD: - vars->line_speed = SPEED_10000; - break; - case LINK_20GTFD: - vars->line_speed = SPEED_20000; - break; - default: - break; + case LINK_10GTFD: + vars->line_speed = SPEED_10000; + break; + case LINK_20GTFD: + vars->line_speed = SPEED_20000; + break; + default: + break; } vars->flow_ctrl = 0; if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED) @@ -4626,7 +4720,7 @@ void bnx2x_sync_link(struct link_params *params, USES_WARPCORE(bp) && (vars->line_speed == SPEED_1000)) vars->phy_flags |= PHY_SGMII_FLAG; - /* anything 10 and over uses the bmac */ + /* Anything 10 and over uses the bmac */ link_10g_plus = (vars->line_speed >= SPEED_10000); if (link_10g_plus) { @@ -4640,7 +4734,7 @@ void bnx2x_sync_link(struct link_params *params, else vars->mac_type = MAC_TYPE_EMAC; } - } else { /* link down */ + } else { /* Link down */ DP(NETIF_MSG_LINK, "phy link down\n"); vars->phy_link_up = 0; @@ -4649,10 +4743,12 @@ void bnx2x_sync_link(struct link_params *params, vars->duplex = DUPLEX_FULL; vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - /* indicate no mac active */ + /* Indicate no mac active */ vars->mac_type = MAC_TYPE_NONE; if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + if (vars->link_status & LINK_STATUS_SFP_TX_FAULT) + vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG; } } @@ -4669,6 +4765,16 @@ void bnx2x_link_status_update(struct link_params *params, offsetof(struct shmem_region, port_mb[port].link_status)); + /* Force link UP in non LOOPBACK_EXT loopback mode(s) */ + if (params->loopback_mode != LOOPBACK_NONE && + params->loopback_mode != LOOPBACK_EXT) + vars->link_status |= LINK_STATUS_LINK_UP; + + if (bnx2x_eee_has_cap(params)) + vars->eee_status = REG_RD(bp, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port])); + vars->phy_flags = PHY_XGXS_FLAG; bnx2x_sync_link(params, vars); /* Sync media type */ @@ -4703,6 +4809,10 @@ void bnx2x_link_status_update(struct link_params *params, params->feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED; + if (SHMEM2_HAS(bp, link_attr_sync)) + vars->link_attr_sync = SHMEM2_RD(bp, + link_attr_sync[params->port]); + DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", vars->link_status, vars->phy_link_up, vars->aeu_int_mask); DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n", @@ -4718,7 +4828,7 @@ static void bnx2x_set_master_ln(struct link_params *params, PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); - /* set the master_ln for AN */ + /* Set the master_ln for AN */ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_XGXS_BLOCK2, MDIO_XGXS_BLOCK2_TEST_MODE_LANE, @@ -4741,7 +4851,7 @@ static int bnx2x_reset_unicore(struct link_params *params, MDIO_REG_BANK_COMBO_IEEE0, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); - /* reset the unicore */ + /* Reset the unicore */ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_COMBO_IEEE0, MDIO_COMBO_IEEE0_MII_CONTROL, @@ -4750,11 +4860,11 @@ static int bnx2x_reset_unicore(struct link_params *params, if (set_serdes) bnx2x_set_serdes_access(bp, params->port); - /* wait for the reset to self clear */ + /* Wait for the reset to self clear */ for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) { udelay(5); - /* the reset erased the previous bank value */ + /* The reset erased the previous bank value */ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_COMBO_IEEE0, MDIO_COMBO_IEEE0_MII_CONTROL, @@ -4778,9 +4888,8 @@ static void bnx2x_set_swap_lanes(struct link_params *params, struct bnx2x_phy *phy) { struct bnx2x *bp = params->bp; - /* - * Each two bits represents a lane number: - * No swap is 0123 => 0x1b no need to enable the swap + /* Each two bits represents a lane number: + * No swap is 0123 => 0x1b no need to enable the swap */ u16 rx_lane_swap, tx_lane_swap; @@ -4973,7 +5082,7 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); } -/* program SerDes, forced speed */ +/* Program SerDes, forced speed */ static void bnx2x_program_serdes(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -4981,7 +5090,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 reg_val; - /* program duplex, disable autoneg and sgmii*/ + /* Program duplex, disable autoneg and sgmii*/ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_COMBO_IEEE0, MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); @@ -4994,14 +5103,13 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy, MDIO_REG_BANK_COMBO_IEEE0, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); - /* - * program speed + /* Program speed * - needed only if the speed is greater than 1G (2.5G or 10G) */ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_SERDES_DIGITAL, MDIO_SERDES_DIGITAL_MISC1, ®_val); - /* clearing the speed value before setting the right speed */ + /* Clearing the speed value before setting the right speed */ DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK | @@ -5030,9 +5138,7 @@ static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 val = 0; - /* configure the 48 bits for BAM AN */ - - /* set extended capabilities */ + /* Set extended capabilities */ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) val |= MDIO_OVER_1G_UP1_2_5G; if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) @@ -5052,7 +5158,7 @@ static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u16 val; - /* for AN, we are always publishing full duplex */ + /* For AN, we are always publishing full duplex */ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_COMBO_IEEE0, @@ -5114,14 +5220,14 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 control1; - /* in SGMII mode, the unicore is always slave */ + /* In SGMII mode, the unicore is always slave */ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_SERDES_DIGITAL, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &control1); control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; - /* set sgmii mode (and not fiber) */ + /* Set sgmii mode (and not fiber) */ control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); @@ -5130,9 +5236,9 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, control1); - /* if forced speed */ + /* If forced speed */ if (!(vars->line_speed == SPEED_AUTO_NEG)) { - /* set speed, disable autoneg */ + /* Set speed, disable autoneg */ u16 mii_control; CL22_RD_OVER_CL45(bp, phy, @@ -5153,16 +5259,16 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000; break; case SPEED_10: - /* there is nothing to set for 10M */ + /* There is nothing to set for 10M */ break; default: - /* invalid speed for SGMII */ + /* Invalid speed for SGMII */ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", vars->line_speed); break; } - /* setting the full duplex */ + /* Setting the full duplex */ if (phy->req_duplex == DUPLEX_FULL) mii_control |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; @@ -5172,16 +5278,13 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, mii_control); } else { /* AN mode */ - /* enable and restart AN */ + /* Enable and restart AN */ bnx2x_restart_autoneg(phy, params, 0); } } - -/* - * link management +/* Link management */ - static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, struct link_params *params) { @@ -5216,22 +5319,69 @@ static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, return 0; } +static void bnx2x_update_adv_fc(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars, + u32 gp_status) +{ + u16 ld_pause; /* local driver */ + u16 lp_pause; /* link partner */ + u16 pause_result; + struct bnx2x *bp = params->bp; + if ((gp_status & + (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | + MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) == + (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | + MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { + + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, + &ld_pause); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_LP_ADV1, + &lp_pause); + pause_result = (ld_pause & + MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) >> 8; + pause_result |= (lp_pause & + MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) >> 10; + DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", pause_result); + } else { + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_ADV, + &ld_pause); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, + &lp_pause); + pause_result = (ld_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; + pause_result |= (lp_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; + DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", pause_result); + } + bnx2x_pause_resolve(vars, pause_result); + +} + static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars, u32 gp_status) { struct bnx2x *bp = params->bp; - u16 ld_pause; /* local driver */ - u16 lp_pause; /* link partner */ - u16 pause_result; - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - /* resolve from gp_status in case of AN complete and not sgmii */ - if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) + /* Resolve from gp_status in case of AN complete and not sgmii */ + if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) { + /* Update the advertised flow-controled of LD/LP in AN */ + if (phy->req_line_speed == SPEED_AUTO_NEG) + bnx2x_update_adv_fc(phy, params, vars, gp_status); + /* But set the flow-control result as the requested one */ vars->flow_ctrl = phy->req_flow_ctrl; - else if (phy->req_line_speed != SPEED_AUTO_NEG) + } else if (phy->req_line_speed != SPEED_AUTO_NEG) vars->flow_ctrl = params->req_fc_auto_adv; else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) && (!(vars->phy_flags & PHY_SGMII_FLAG))) { @@ -5239,45 +5389,7 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy, vars->flow_ctrl = params->req_fc_auto_adv; return; } - if ((gp_status & - (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | - MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) == - (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | - MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { - - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV1, - &ld_pause); - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_LP_ADV1, - &lp_pause); - pause_result = (ld_pause & - MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) - >> 8; - pause_result |= (lp_pause & - MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) - >> 10; - DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", - pause_result); - } else { - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_AUTO_NEG_ADV, - &ld_pause); - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, - &lp_pause); - pause_result = (ld_pause & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; - pause_result |= (lp_pause & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; - DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", - pause_result); - } - bnx2x_pause_resolve(vars, pause_result); + bnx2x_update_adv_fc(phy, params, vars, gp_status); } DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl); } @@ -5317,8 +5429,7 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, "ustat_val(0x8371) = 0x%x\n", ustat_val); return; } - /* - * Step 3: Check CL37 Message Pages received to indicate LP + /* Step 3: Check CL37 Message Pages received to indicate LP * supports only CL37 */ CL22_RD_OVER_CL45(bp, phy, @@ -5335,8 +5446,7 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, cl37_fsm_received); return; } - /* - * The combined cl37/cl73 fsm state information indicating that + /* The combined cl37/cl73 fsm state information indicating that * we are connected to a device which does not support cl73, but * does support cl37 BAM. In this case we disable cl73 and * restart cl37 auto-neg @@ -5384,7 +5494,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, switch (speed_mask) { case GP_STATUS_10M: vars->line_speed = SPEED_10; - if (vars->duplex == DUPLEX_FULL) + if (is_duplex == DUPLEX_FULL) vars->link_status |= LINK_10TFD; else vars->link_status |= LINK_10THD; @@ -5392,7 +5502,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, case GP_STATUS_100M: vars->line_speed = SPEED_100; - if (vars->duplex == DUPLEX_FULL) + if (is_duplex == DUPLEX_FULL) vars->link_status |= LINK_100TXFD; else vars->link_status |= LINK_100TXHD; @@ -5401,7 +5511,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, case GP_STATUS_1G: case GP_STATUS_1G_KX: vars->line_speed = SPEED_1000; - if (vars->duplex == DUPLEX_FULL) + if (is_duplex == DUPLEX_FULL) vars->link_status |= LINK_1000TFD; else vars->link_status |= LINK_1000THD; @@ -5409,7 +5519,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, case GP_STATUS_2_5G: vars->line_speed = SPEED_2500; - if (vars->duplex == DUPLEX_FULL) + if (is_duplex == DUPLEX_FULL) vars->link_status |= LINK_2500TFD; else vars->link_status |= LINK_2500THD; @@ -5432,6 +5542,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, vars->link_status |= LINK_10GTFD; break; case GP_STATUS_20G_DXGXS: + case GP_STATUS_20G_KR2: vars->line_speed = SPEED_20000; vars->link_status |= LINK_20GTFD; break; @@ -5483,12 +5594,13 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy, if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { if (SINGLE_MEDIA_DIRECT(params)) { + vars->duplex = duplex; bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status); if (phy->req_line_speed == SPEED_AUTO_NEG) bnx2x_xgxs_an_resolve(phy, params, vars, gp_status); } - } else { /* link_down */ + } else { /* Link_down */ if ((phy->req_line_speed == SPEED_AUTO_NEG) && SINGLE_MEDIA_DIRECT(params)) { /* Check signal is detected */ @@ -5496,6 +5608,33 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy, } } + /* Read LP advertised speeds*/ + if (SINGLE_MEDIA_DIRECT(params) && + (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)) { + u16 val; + + CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_LP_ADV2, &val); + + if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 | + MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + + CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_LP_UP1, &val); + + if (val & MDIO_OVER_1G_UP1_2_5G) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE; + if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + } + DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", vars->duplex, vars->flow_ctrl, vars->link_status); return rc; @@ -5511,7 +5650,15 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, int rc = 0; lane = bnx2x_get_warpcore_lane(phy, params); /* Read gp_status */ - if (phy->req_line_speed > SPEED_10000) { + if ((params->loopback_mode) && + (phy->flags & FLAGS_WC_DUAL_MODE)) { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up); + link_up &= 0x1; + } else if ((phy->req_line_speed > SPEED_10000) && + (phy->supported & SUPPORTED_20000baseMLD2_Full)) { u16 temp_link_up; bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 1, &temp_link_up); @@ -5524,12 +5671,22 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, bnx2x_ext_phy_resolve_fc(phy, params, vars); } else { bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1); + MDIO_WC_REG_GP2_STATUS_GP_2_1, + &gp_status1); DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1); - /* Check for either KR or generic link up. */ - gp_status1 = ((gp_status1 >> 8) & 0xf) | - ((gp_status1 >> 12) & 0xf); - link_up = gp_status1 & (1 << lane); + /* Check for either KR, 1G, or AN up. */ + link_up = ((gp_status1 >> 8) | + (gp_status1 >> 12) | + (gp_status1)) & + (1 << lane); + if (phy->supported & SUPPORTED_20000baseKR2_Full) { + u16 an_link; + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_STATUS, &an_link); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_STATUS, &an_link); + link_up |= (an_link & (1<<2)); + } if (link_up && SINGLE_MEDIA_DIRECT(params)) { u16 pd, gp_status4; if (phy->req_line_speed == SPEED_AUTO_NEG) { @@ -5550,9 +5707,38 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, LINK_STATUS_PARALLEL_DETECTION_USED; } bnx2x_ext_phy_resolve_fc(phy, params, vars); + vars->duplex = duplex; } } + if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) && + SINGLE_MEDIA_DIRECT(params)) { + u16 val; + + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG2, &val); + + if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 | + MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL3_LP_UP1, &val); + + if (val & MDIO_OVER_1G_UP1_2_5G) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE; + if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + + } + + if (lane < 2) { bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed); @@ -5565,11 +5751,16 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, if ((lane & 1) == 0) gp_speed <<= 8; gp_speed &= 0x3f00; - + link_up = !!link_up; rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, duplex); + /* In case of KR link down, start up the recovering procedure */ + if ((!link_up) && (phy->media_type == ETH_PHY_KR) && + (!(phy->flags & FLAGS_WC_DUAL_MODE))) + vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; + DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", vars->duplex, vars->flow_ctrl, vars->link_status); return rc; @@ -5582,12 +5773,12 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params) u16 tx_driver; u16 bank; - /* read precomp */ + /* Read precomp */ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_OVER_1G, MDIO_OVER_1G_LP_UP2, &lp_up2); - /* bits [10:7] at lp_up2, positioned at [15:12] */ + /* Bits [10:7] at lp_up2, positioned at [15:12] */ lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) << MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT); @@ -5601,7 +5792,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params) bank, MDIO_TX0_TX_DRIVER, &tx_driver); - /* replace tx_driver bits [15:12] */ + /* Replace tx_driver bits [15:12] */ if (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; @@ -5697,16 +5888,16 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy, FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) bnx2x_set_preemphasis(phy, params); - /* forced speed requested? */ + /* Forced speed requested? */ if (vars->line_speed != SPEED_AUTO_NEG || (SINGLE_MEDIA_DIRECT(params) && params->loopback_mode == LOOPBACK_EXT)) { DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); - /* disable autoneg */ + /* Disable autoneg */ bnx2x_set_autoneg(phy, params, vars, 0); - /* program speed and duplex */ + /* Program speed and duplex */ bnx2x_program_serdes(phy, params, vars); } else { /* AN_mode */ @@ -5715,14 +5906,14 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy, /* AN enabled */ bnx2x_set_brcm_cl37_advertisement(phy, params); - /* program duplex & pause advertisement (for aneg) */ + /* Program duplex & pause advertisement (for aneg) */ bnx2x_set_ieee_aneg_advertisement(phy, params, vars->ieee_fc); - /* enable autoneg */ + /* Enable autoneg */ bnx2x_set_autoneg(phy, params, vars, enable_cl73); - /* enable and restart AN */ + /* Enable and restart AN */ bnx2x_restart_autoneg(phy, params, enable_cl73); } @@ -5758,12 +5949,12 @@ static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy, bnx2x_set_master_ln(params, phy); rc = bnx2x_reset_unicore(params, phy, 0); - /* reset the SerDes and wait for reset bit return low */ - if (rc != 0) + /* Reset the SerDes and wait for reset bit return low */ + if (rc) return rc; bnx2x_set_aer_mmd(params, phy); - /* setting the masterLn_def again after the reset */ + /* Setting the masterLn_def again after the reset */ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { bnx2x_set_master_ln(params, phy); bnx2x_set_swap_lanes(params, phy); @@ -5788,7 +5979,7 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, MDIO_PMA_REG_CTRL, &ctrl); if (!(ctrl & (1<<15))) break; - msleep(1); + usleep_range(1000, 2000); } if (cnt == 1000) @@ -5852,8 +6043,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port, { u32 latch_status = 0; - /* - * Disable the MI INT ( external phy int ) by writing 1 to the + /* Disable the MI INT ( external phy int ) by writing 1 to the * status register. Link down indication is high-active-signal, * so in this case we need to write the status to clear the XOR */ @@ -5888,8 +6078,7 @@ static void bnx2x_link_int_ack(struct link_params *params, struct bnx2x *bp = params->bp; u8 port = params->port; u32 mask; - /* - * First reset all status we assume only one line will be + /* First reset all status we assume only one line will be * change at a time */ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, @@ -5903,8 +6092,7 @@ static void bnx2x_link_int_ack(struct link_params *params, if (is_10g_plus) mask = NIG_STATUS_XGXS0_LINK10G; else if (params->switch_cfg == SWITCH_CFG_10G) { - /* - * Disable the link interrupt by writing 1 to + /* Disable the link interrupt by writing 1 to * the relevant lane in the status register */ u32 ser_lane = @@ -5970,8 +6158,8 @@ static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) return 0; } -int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, - u8 *version, u16 len) +int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version, + u16 len) { struct bnx2x *bp; u32 spirom_ver = 0; @@ -6022,7 +6210,7 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); if (!CHIP_IS_E3(bp)) { - /* change the uni_phy_addr in the nig */ + /* Change the uni_phy_addr in the nig */ md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18)); @@ -6042,11 +6230,11 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 0x6041); msleep(200); - /* set aer mmd back */ + /* Set aer mmd back */ bnx2x_set_aer_mmd(params, phy); if (!CHIP_IS_E3(bp)) { - /* and md_devad */ + /* And md_devad */ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad); } @@ -6095,17 +6283,18 @@ int bnx2x_set_led(struct link_params *params, tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); if (params->phy[EXT_PHY1].type == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) - EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp & 0xfff1); - else { - EMAC_WR(bp, EMAC_REG_EMAC_LED, - (tmp | EMAC_LED_OVERRIDE)); - } + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) + tmp &= ~(EMAC_LED_1000MB_OVERRIDE | + EMAC_LED_100MB_OVERRIDE | + EMAC_LED_10MB_OVERRIDE); + else + tmp |= EMAC_LED_OVERRIDE; + + EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp); break; case LED_MODE_OPER: - /* - * For all other phys, OPER mode is same as ON, so in case + /* For all other phys, OPER mode is same as ON, so in case * link is down, do nothing */ if (!vars->link_up) @@ -6116,9 +6305,7 @@ int bnx2x_set_led(struct link_params *params, (params->phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) && CHIP_IS_E2(bp) && params->num_phys == 2) { - /* - * This is a work-around for E2+8727 Configurations - */ + /* This is a work-around for E2+8727 Configurations */ if (mode == LED_MODE_ON || speed == SPEED_10000){ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); @@ -6127,8 +6314,7 @@ int bnx2x_set_led(struct link_params *params, tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE)); - /* - * return here without enabling traffic + /* Return here without enabling traffic * LED blink and setting rate in ON mode. * In oper mode, enabling LED blink * and setting rate is needed. @@ -6137,8 +6323,7 @@ int bnx2x_set_led(struct link_params *params, return rc; } } else if (SINGLE_MEDIA_DIRECT(params)) { - /* - * This is a work-around for HW issue found when link + /* This is a work-around for HW issue found when link * is up in CL73 */ if ((!CHIP_IS_E3(bp)) || @@ -6155,13 +6340,24 @@ int bnx2x_set_led(struct link_params *params, hw_led_mode); } else if ((params->phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) && - (mode != LED_MODE_OPER)) { + (mode == LED_MODE_ON)) { REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); - EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp | 0x3); - } else + EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp | + EMAC_LED_OVERRIDE | EMAC_LED_1000MB_OVERRIDE); + /* Break here; otherwise, it'll disable the + * intended override. + */ + break; + } else { + u32 nig_led_mode = ((params->hw_led_mode << + SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY2) ? + (SHARED_HW_CFG_LED_PHY1 >> + SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode; REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, - hw_led_mode); + nig_led_mode); + } REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); /* Set blinking rate to ~15.9Hz */ @@ -6173,23 +6369,16 @@ int bnx2x_set_led(struct link_params *params, LED_BLINK_RATE_VAL_E1X_E2); REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1); - if ((params->phy[EXT_PHY1].type != - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) && - (mode != LED_MODE_OPER)) { - tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); - EMAC_WR(bp, EMAC_REG_EMAC_LED, - (tmp & (~EMAC_LED_OVERRIDE))); - } + tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); + EMAC_WR(bp, EMAC_REG_EMAC_LED, + (tmp & (~EMAC_LED_OVERRIDE))); if (CHIP_IS_E1(bp) && ((speed == SPEED_2500) || (speed == SPEED_1000) || (speed == SPEED_100) || (speed == SPEED_10))) { - /* - * On Everest 1 Ax chip versions for speeds less than - * 10G LED scheme is different - */ + /* For speeds less than 10G LED scheme is different */ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1); REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + @@ -6209,8 +6398,7 @@ int bnx2x_set_led(struct link_params *params, } -/* - * This function comes to reflect the actual link state read DIRECTLY from the +/* This function comes to reflect the actual link state read DIRECTLY from the * HW */ int bnx2x_test_link(struct link_params *params, struct link_vars *vars, @@ -6249,7 +6437,7 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars, MDIO_REG_BANK_GP_STATUS, MDIO_GP_STATUS_TOP_AN_STATUS1, &gp_status); - /* link is up only if both local phy and external phy are up */ + /* Link is up only if both local phy and external phy are up */ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) return -ESRCH; } @@ -6270,7 +6458,9 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars, for (phy_index = EXT_PHY1; phy_index < params->num_phys; phy_index++) { serdes_phy_type = ((params->phy[phy_index].media_type == - ETH_PHY_SFP_FIBER) || + ETH_PHY_SFPP_10G_FIBER) || + (params->phy[phy_index].media_type == + ETH_PHY_SFP_1G_FIBER) || (params->phy[phy_index].media_type == ETH_PHY_XFP_FIBER) || (params->phy[phy_index].media_type == @@ -6295,19 +6485,16 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars, static int bnx2x_link_initialize(struct link_params *params, struct link_vars *vars) { - int rc = 0; u8 phy_index, non_ext_phy; struct bnx2x *bp = params->bp; - /* - * In case of external phy existence, the line speed would be the + /* In case of external phy existence, the line speed would be the * line speed linked up by the external phy. In case it is direct * only, then the line_speed during initialization will be * equal to the req_line_speed */ vars->line_speed = params->phy[INT_PHY].req_line_speed; - /* - * Initialize the internal phy in case this is a direct board + /* Initialize the internal phy in case this is a direct board * (no external phys), or this board has external phy which requires * to first. */ @@ -6325,12 +6512,15 @@ static int bnx2x_link_initialize(struct link_params *params, (CHIP_IS_E1x(bp) || CHIP_IS_E2(bp))) bnx2x_set_parallel_detection(phy, params); - if (params->phy[INT_PHY].config_init) - params->phy[INT_PHY].config_init(phy, - params, - vars); + if (params->phy[INT_PHY].config_init) + params->phy[INT_PHY].config_init(phy, params, vars); } + /* Re-read this value in case it was changed inside config_init due to + * limitations of optic module + */ + vars->line_speed = params->phy[INT_PHY].req_line_speed; + /* Init external phy*/ if (non_ext_phy) { if (params->phy[INT_PHY].supported & @@ -6339,8 +6529,7 @@ static int bnx2x_link_initialize(struct link_params *params, } else { for (phy_index = EXT_PHY1; phy_index < params->num_phys; phy_index++) { - /* - * No need to initialize second phy in case of first + /* No need to initialize second phy in case of first * phy only selection. In case of second phy, we do * need to initialize the first phy, since they are * connected. @@ -6368,14 +6557,13 @@ static int bnx2x_link_initialize(struct link_params *params, NIG_STATUS_XGXS0_LINK_STATUS | NIG_STATUS_SERDES0_LINK_STATUS | NIG_MASK_MI_INT)); - bnx2x_update_mng(params, vars->link_status); - return rc; + return 0; } static void bnx2x_int_link_reset(struct bnx2x_phy *phy, struct link_params *params) { - /* reset the SerDes/XGXS */ + /* Reset the SerDes/XGXS */ REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, (0x1ff << (params->port*16))); } @@ -6408,39 +6596,39 @@ static int bnx2x_update_link_down(struct link_params *params, DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); bnx2x_set_led(params, vars, LED_MODE_OFF, 0); vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG; - /* indicate no mac active */ + /* Indicate no mac active */ vars->mac_type = MAC_TYPE_NONE; - /* update shared memory */ - vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | - LINK_STATUS_LINK_UP | - LINK_STATUS_PHYSICAL_LINK_FLAG | - LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | - LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | - LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | - LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK); + /* Update shared memory */ + vars->link_status &= ~LINK_UPDATE_MASK; vars->line_speed = 0; bnx2x_update_mng(params, vars->link_status); - /* activate nig drain */ + /* Activate nig drain */ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); - /* disable emac */ + /* Disable emac */ if (!CHIP_IS_E3(bp)) REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); - msleep(10); - /* reset BigMac/Xmac */ + usleep_range(10000, 20000); + /* Reset BigMac/Xmac */ if (CHIP_IS_E1x(bp) || - CHIP_IS_E2(bp)) { - bnx2x_bmac_rx_disable(bp, params->port); - REG_WR(bp, GRCBASE_MISC + - MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - } + CHIP_IS_E2(bp)) + bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0); + if (CHIP_IS_E3(bp)) { - bnx2x_xmac_disable(params); - bnx2x_umac_disable(params); + /* Prevent LPI Generation by chip */ + REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), + 0); + REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2), + 0); + vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | + SHMEM_EEE_ACTIVE_BIT); + + bnx2x_update_mng_eee(params, vars->eee_status); + bnx2x_set_xmac_rxtx(params, 0); + bnx2x_set_umac_rxtx(params, 0); } return 0; @@ -6451,7 +6639,7 @@ static int bnx2x_update_link_up(struct link_params *params, u8 link_10g) { struct bnx2x *bp = params->bp; - u8 port = params->port; + u8 phy_idx, port = params->port; int rc = 0; vars->link_status |= (LINK_STATUS_LINK_UP | @@ -6478,11 +6666,21 @@ static int bnx2x_update_link_up(struct link_params *params, bnx2x_umac_enable(params, vars, 0); bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); + + if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) && + (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) { + DP(NETIF_MSG_LINK, "Enabling LPI assertion\n"); + REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + + (params->port << 2), 1); + REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1); + REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + + (params->port << 2), 0xfc20); + } } if ((CHIP_IS_E1x(bp) || CHIP_IS_E2(bp))) { if (link_10g) { - if (bnx2x_bmac_enable(params, vars, 0) == + if (bnx2x_bmac_enable(params, vars, 0, 1) == -ESRCH) { DP(NETIF_MSG_LINK, "Found errors on BMAC\n"); vars->link_up = 0; @@ -6510,16 +6708,23 @@ static int bnx2x_update_link_up(struct link_params *params, rc |= bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed); - /* disable drain */ + /* Disable drain */ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); - /* update shared memory */ + /* Update shared memory */ bnx2x_update_mng(params, vars->link_status); + bnx2x_update_mng_eee(params, vars->eee_status); + /* Check remote fault */ + for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { + if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { + bnx2x_check_half_open_conn(params, vars, 0); + break; + } + } msleep(20); return rc; } -/* - * The bnx2x_link_update function should be called upon link +/* The bnx2x_link_update function should be called upon link * interrupt. * Link is considered up as follows: * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs @@ -6543,6 +6748,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; u8 active_external_phy = INT_PHY; vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~LINK_UPDATE_MASK; for (phy_index = INT_PHY; phy_index < params->num_phys; phy_index++) { phy_vars[phy_index].flow_ctrl = 0; @@ -6552,6 +6758,8 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) phy_vars[phy_index].phy_link_up = 0; phy_vars[phy_index].link_up = 0; phy_vars[phy_index].fault_detected = 0; + /* different consideration, since vars holds inner state */ + phy_vars[phy_index].eee_status = vars->eee_status; } if (USES_WARPCORE(bp)) @@ -6572,12 +6780,11 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); - /* disable emac */ + /* Disable emac */ if (!CHIP_IS_E3(bp)) REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); - /* - * Step 1: + /* Step 1: * Check external link change only for external phys, and apply * priority selection between them in case the link on both phys * is up. Note that instead of the common vars, a temporary @@ -6608,23 +6815,20 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) switch (bnx2x_phy_selection(params)) { case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: - /* - * In this option, the first PHY makes sure to pass the + /* In this option, the first PHY makes sure to pass the * traffic through itself only. * Its not clear how to reset the link on the second phy */ active_external_phy = EXT_PHY1; break; case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: - /* - * In this option, the first PHY makes sure to pass the + /* In this option, the first PHY makes sure to pass the * traffic through the second PHY. */ active_external_phy = EXT_PHY2; break; default: - /* - * Link indication on both PHYs with the following cases + /* Link indication on both PHYs with the following cases * is invalid: * - FIRST_PHY means that second phy wasn't initialized, * hence its link is expected to be down @@ -6641,8 +6845,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) } } prev_line_speed = vars->line_speed; - /* - * Step 2: + /* Step 2: * Read the status of the internal phy. In case of * DIRECT_SINGLE_MEDIA board, this link is the external link, * otherwise this is the link between the 577xx and the first @@ -6652,8 +6855,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) params->phy[INT_PHY].read_status( ¶ms->phy[INT_PHY], params, vars); - /* - * The INT_PHY flow control reside in the vars. This include the + /* The INT_PHY flow control reside in the vars. This include the * case where the speed or flow control are not set to AUTO. * Otherwise, the active external phy flow control result is set * to the vars. The ext_phy_line_speed is needed to check if the @@ -6662,14 +6864,12 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) */ if (active_external_phy > INT_PHY) { vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; - /* - * Link speed is taken from the XGXS. AN and FC result from + /* Link speed is taken from the XGXS. AN and FC result from * the external phy. */ vars->link_status |= phy_vars[active_external_phy].link_status; - /* - * if active_external_phy is first PHY and link is up - disable + /* if active_external_phy is first PHY and link is up - disable * disable TX on second external PHY */ if (active_external_phy == EXT_PHY1) { @@ -6689,6 +6889,9 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) vars->link_status |= LINK_STATUS_SERDES_LINK; else vars->link_status &= ~LINK_STATUS_SERDES_LINK; + + vars->eee_status = phy_vars[active_external_phy].eee_status; + DP(NETIF_MSG_LINK, "Active external phy selected: %x\n", active_external_phy); } @@ -6706,8 +6909,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," " ext_phy_line_speed = %d\n", vars->flow_ctrl, vars->link_status, ext_phy_line_speed); - /* - * Upon link speed change set the NIG into drain mode. Comes to + /* Upon link speed change set the NIG into drain mode. Comes to * deals with possible FIFO glitch due to clk change when speed * is decreased without link down indicator */ @@ -6723,17 +6925,16 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) } else if (prev_line_speed != vars->line_speed) { REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); - msleep(1); + usleep_range(1000, 2000); } } - /* anything 10 and over uses the bmac */ + /* Anything 10 and over uses the bmac */ link_10g_plus = (vars->line_speed >= SPEED_10000); bnx2x_link_int_ack(params, vars, link_10g_plus); - /* - * In case external phy link is up, and internal link is down + /* In case external phy link is up, and internal link is down * (not initialized yet probably after link initialization, it * needs to be initialized. * Note that after link down-up as result of cable plug, the xgxs @@ -6761,8 +6962,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) vars); } } - /* - * Link is up only if both local phy and external phy (in case of + /* Link is up only if both local phy and external phy (in case of * non-direct board) are up and no fault detected on active PHY. */ vars->link_up = (vars->phy_link_up && @@ -6770,11 +6970,21 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) SINGLE_MEDIA_DIRECT(params)) && (phy_vars[active_external_phy].fault_detected == 0)); + /* Update the PFC configuration in case it was changed */ + if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) + vars->link_status |= LINK_STATUS_PFC_ENABLED; + else + vars->link_status &= ~LINK_STATUS_PFC_ENABLED; + if (vars->link_up) rc = bnx2x_update_link_up(params, vars, link_10g_plus); else rc = bnx2x_update_link_down(params, vars); + /* Update MCP link status was changed */ + if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX) + bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0); + return rc; } @@ -6785,7 +6995,7 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port) { bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_OUTPUT_LOW, port); - msleep(1); + usleep_range(1000, 2000); bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); } @@ -6882,7 +7092,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, MDIO_PMA_REG_GEN_CTRL, 0x0001); - /* ucode reboot and rst */ + /* Ucode reboot and rst */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, @@ -6926,7 +7136,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout); - msleep(1); + usleep_range(1000, 2000); } while (fw_ver1 == 0 || fw_ver1 == 0x4321 || ((fw_msgout & 0xff) != 0x03 && (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))); @@ -6988,8 +7198,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) } /* XAUI workaround in 8073 A0: */ - /* - * After loading the boot ROM and restarting Autoneg, poll + /* After loading the boot ROM and restarting Autoneg, poll * Dev1, Reg $C820: */ @@ -6998,8 +7207,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &val); - /* - * If bit [14] = 0 or bit [13] = 0, continue on with + /* If bit [14] = 0 or bit [13] = 0, continue on with * system initialization (XAUI work-around not required, as * these bits indicate 2.5G or 1G link up). */ @@ -7008,8 +7216,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) return 0; } else if (!(val & (1<<15))) { DP(NETIF_MSG_LINK, "bit 15 went off\n"); - /* - * If bit 15 is 0, then poll Dev1, Reg $C841 until it's + /* If bit 15 is 0, then poll Dev1, Reg $C841 until it's * MSB (bit15) goes to 1 (indicating that the XAUI * workaround has completed), then continue on with * system initialization. @@ -7023,11 +7230,11 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) "XAUI workaround has completed\n"); return 0; } - msleep(3); + usleep_range(3000, 6000); } break; } - msleep(3); + usleep_range(3000, 6000); } DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n"); return -EINVAL; @@ -7081,6 +7288,22 @@ static void bnx2x_8073_set_pause_cl37(struct link_params *params, msleep(500); } +static void bnx2x_8073_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + switch (action) { + case PHY_INIT: + /* Enable LASI */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004); + break; + } +} + static int bnx2x_8073_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -7101,12 +7324,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy, bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); - /* enable LASI */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004); - + bnx2x_8073_specific_func(phy, params, PHY_INIT); bnx2x_8073_set_pause_cl37(params, phy, vars); bnx2x_cl45_read(bp, phy, @@ -7159,8 +7377,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy, val = (1<<7); } else if (phy->req_line_speed == SPEED_2500) { val = (1<<5); - /* - * Note that 2.5G works only when used with 1G + /* Note that 2.5G works only when used with 1G * advertisement */ } else @@ -7211,8 +7428,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy, /* Add support for CL37 (passive mode) III */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); - /* - * The SNR will improve about 2db by changing BW and FEE main + /* The SNR will improve about 2db by changing BW and FEE main * tap. Rest commands are executed after link is up * Change FFE main cursor to 5 in EDC register */ @@ -7251,7 +7467,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1); - /* clear the interrupt LASI status register */ + /* Clear the interrupt LASI status register */ bnx2x_cl45_read(bp, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); bnx2x_cl45_read(bp, phy, @@ -7299,8 +7515,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1))); if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { - /* - * The SNR will improve about 2dbby changing the BW and FEE main + /* The SNR will improve about 2dbby changing the BW and FEE main * tap. The 1st write to change FFE main tap is set before * restart AN. Change PLL Bandwidth in EDC register */ @@ -7347,8 +7562,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); - /* - * Set bit 3 to invert Rx in 1G mode and clear this bit + /* Set bit 3 to invert Rx in 1G mode and clear this bit * when it`s in 10G mode. */ if (vars->line_speed == SPEED_1000) { @@ -7367,6 +7581,19 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, bnx2x_8073_resolve_fc(phy, params, vars); vars->duplex = DUPLEX_FULL; } + + if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG2, &val1); + + if (val1 & (1<<5)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + if (val1 & (1<<7)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + } + return link_up; } @@ -7457,8 +7684,7 @@ static void bnx2x_set_disable_pmd_transmit(struct link_params *params, u8 pmd_dis) { struct bnx2x *bp = params->bp; - /* - * Disable transmitter only for bootcodes which can enable it afterwards + /* Disable transmitter only for bootcodes which can enable it afterwards * (for D3 link) */ if (pmd_dis) { @@ -7561,12 +7787,13 @@ static void bnx2x_sfp_set_transmitter(struct link_params *params, static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct link_params *params, - u16 addr, u8 byte_cnt, u8 *o_buf) + u8 dev_addr, u16 addr, u8 byte_cnt, + u8 *o_buf, u8 is_init) { struct bnx2x *bp = params->bp; u16 val = 0; u16 i; - if (byte_cnt > 16) { + if (byte_cnt > SFP_EEPROM_PAGE_SIZE) { DP(NETIF_MSG_LINK, "Reading from eeprom is limited to 0xf\n"); return -EINVAL; @@ -7574,7 +7801,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, /* Set the read command byte count */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, - (byte_cnt | 0xa000)); + (byte_cnt | (dev_addr << 8))); /* Set the read command address */ bnx2x_cl45_write(bp, phy, @@ -7620,25 +7847,45 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) return 0; - msleep(1); + usleep_range(1000, 2000); } return -EINVAL; } +static void bnx2x_warpcore_power_module(struct link_params *params, + u8 power) +{ + u32 pin_cfg; + struct bnx2x *bp = params->bp; + + pin_cfg = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_PWR_DIS_MASK) >> + PORT_HW_CFG_E3_PWR_DIS_SHIFT; + + if (pin_cfg == PIN_CFG_NA) + return; + DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", + power, pin_cfg); + /* Low ==> corresponding SFP+ module is powered + * high ==> the SFP+ module is powered down + */ + bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); +} static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct link_params *params, + u8 dev_addr, u16 addr, u8 byte_cnt, - u8 *o_buf) + u8 *o_buf, u8 is_init) { int rc = 0; u8 i, j = 0, cnt = 0; u32 data_array[4]; u16 addr32; struct bnx2x *bp = params->bp; - /*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:" - " addr %d, cnt %d\n", - addr, byte_cnt);*/ - if (byte_cnt > 16) { + + if (byte_cnt > SFP_EEPROM_PAGE_SIZE) { DP(NETIF_MSG_LINK, "Reading from eeprom is limited to 16 bytes\n"); return -EINVAL; @@ -7647,7 +7894,13 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, /* 4 byte aligned address */ addr32 = addr & (~0x3); do { - rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, + if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) { + bnx2x_warpcore_power_module(params, 0); + /* Note that 100us are not enough here */ + usleep_range(1000, 2000); + bnx2x_warpcore_power_module(params, 1); + } + rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt, data_array); } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); @@ -7663,17 +7916,27 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct link_params *params, - u16 addr, u8 byte_cnt, u8 *o_buf) + u8 dev_addr, u16 addr, u8 byte_cnt, + u8 *o_buf, u8 is_init) { struct bnx2x *bp = params->bp; u16 val, i; - if (byte_cnt > 16) { + if (byte_cnt > SFP_EEPROM_PAGE_SIZE) { DP(NETIF_MSG_LINK, "Reading from eeprom is limited to 0xf\n"); return -EINVAL; } + /* Set 2-wire transfer rate of SFP+ module EEPROM + * to 100Khz since some DACs(direct attached cables) do + * not work at 400Khz. + */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, + ((dev_addr << 8) | 1)); + /* Need to read from 1.8000 to clear it */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, @@ -7702,11 +7965,10 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 0x8002); - /* - * Wait appropriate time for two-wire command to finish before + /* Wait appropriate time for two-wire command to finish before * polling the status register */ - msleep(1); + usleep_range(1000, 2000); /* Wait up to 500us for command complete status */ for (i = 0; i < 100; i++) { @@ -7742,31 +8004,49 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) return 0; - msleep(1); + usleep_range(1000, 2000); } return -EINVAL; } - int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, u16 addr, - u8 byte_cnt, u8 *o_buf) + struct link_params *params, u8 dev_addr, + u16 addr, u16 byte_cnt, u8 *o_buf) { - int rc = -EINVAL; + int rc = 0; + struct bnx2x *bp = params->bp; + u8 xfer_size; + u8 *user_data = o_buf; + read_sfp_module_eeprom_func_p read_func; + + if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) { + DP(NETIF_MSG_LINK, "invalid dev_addr 0x%x\n", dev_addr); + return -EINVAL; + } + switch (phy->type) { case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: - rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); - break; + read_func = bnx2x_8726_read_sfp_module_eeprom; + break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: - rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); - break; + read_func = bnx2x_8727_read_sfp_module_eeprom; + break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: - rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); - break; + read_func = bnx2x_warpcore_read_sfp_module_eeprom; + break; + default: + return -EOPNOTSUPP; + } + + while (!rc && (byte_cnt > 0)) { + xfer_size = (byte_cnt > SFP_EEPROM_PAGE_SIZE) ? + SFP_EEPROM_PAGE_SIZE : byte_cnt; + rc = read_func(phy, params, dev_addr, addr, xfer_size, + user_data, 0); + byte_cnt -= xfer_size; + user_data += xfer_size; + addr += xfer_size; } return rc; } @@ -7777,31 +8057,31 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u32 sync_offset = 0, phy_idx, media_types; - u8 val, check_limiting_mode = 0; + u8 gport, val[2], check_limiting_mode = 0; *edc_mode = EDC_MODE_LIMITING; - phy->media_type = ETH_PHY_UNSPECIFIED; /* First check for copper cable */ if (bnx2x_read_sfp_module_eeprom(phy, params, + I2C_DEV_ADDR_A0, SFP_EEPROM_CON_TYPE_ADDR, - 1, - &val) != 0) { + 2, + (u8 *)val) != 0) { DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); return -EINVAL; } - switch (val) { + switch (val[0]) { case SFP_EEPROM_CON_TYPE_VAL_COPPER: { u8 copper_module_type; phy->media_type = ETH_PHY_DA_TWINAX; - /* - * Check if its active cable (includes SFP+ module) + /* Check if its active cable (includes SFP+ module) * of passive cable */ if (bnx2x_read_sfp_module_eeprom(phy, params, + I2C_DEV_ADDR_A0, SFP_EEPROM_FC_TX_TECH_ADDR, 1, &copper_module_type) != 0) { @@ -7814,29 +8094,62 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, if (copper_module_type & SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { DP(NETIF_MSG_LINK, "Active Copper cable detected\n"); - check_limiting_mode = 1; - } else if (copper_module_type & - SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + *edc_mode = EDC_MODE_ACTIVE_DAC; + else + check_limiting_mode = 1; + } else { + *edc_mode = EDC_MODE_PASSIVE_DAC; + /* Even in case PASSIVE_DAC indication is not set, + * treat it as a passive DAC cable, since some cables + * don't have this indication. + */ + if (copper_module_type & + SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { DP(NETIF_MSG_LINK, "Passive Copper cable detected\n"); - *edc_mode = - EDC_MODE_PASSIVE_DAC; - } else { - DP(NETIF_MSG_LINK, - "Unknown copper-cable-type 0x%x !!!\n", - copper_module_type); - return -EINVAL; + } else { + DP(NETIF_MSG_LINK, + "Unknown copper-cable-type\n"); + } } break; } case SFP_EEPROM_CON_TYPE_VAL_LC: - phy->media_type = ETH_PHY_SFP_FIBER; - DP(NETIF_MSG_LINK, "Optic module detected\n"); + case SFP_EEPROM_CON_TYPE_VAL_RJ45: check_limiting_mode = 1; + if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK | + SFP_EEPROM_COMP_CODE_LR_MASK | + SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) { + DP(NETIF_MSG_LINK, "1G SFP module detected\n"); + gport = params->port; + phy->media_type = ETH_PHY_SFP_1G_FIBER; + if (phy->req_line_speed != SPEED_1000) { + phy->req_line_speed = SPEED_1000; + if (!CHIP_IS_E1x(bp)) { + gport = BP_PATH(bp) + + (params->port << 1); + } + netdev_err(bp->dev, + "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n", + gport); + } + } else { + int idx, cfg_idx = 0; + DP(NETIF_MSG_LINK, "10G Optic module detected\n"); + for (idx = INT_PHY; idx < MAX_PHYS; idx++) { + if (params->phy[idx].type == phy->type) { + cfg_idx = LINK_CONFIG_IDX(idx); + break; + } + } + phy->media_type = ETH_PHY_SFPP_10G_FIBER; + phy->req_line_speed = params->req_line_speed[cfg_idx]; + } break; default: DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", - val); + val[0]); return -EINVAL; } sync_offset = params->shmem_base + @@ -7859,6 +8172,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, u8 options[SFP_EEPROM_OPTIONS_SIZE]; if (bnx2x_read_sfp_module_eeprom(phy, params, + I2C_DEV_ADDR_A0, SFP_EEPROM_OPTIONS_ADDR, SFP_EEPROM_OPTIONS_SIZE, options) != 0) { @@ -7874,8 +8188,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); return 0; } -/* - * This function read the relevant field from the module (SFP+), and verify it +/* This function read the relevant field from the module (SFP+), and verify it * is compliant with this board */ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, @@ -7923,9 +8236,10 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, return 0; } - /* format the warning message */ + /* Format the warning message */ if (bnx2x_read_sfp_module_eeprom(phy, params, + I2C_DEV_ADDR_A0, SFP_EEPROM_VENDOR_NAME_ADDR, SFP_EEPROM_VENDOR_NAME_SIZE, (u8 *)vendor_name)) @@ -7934,6 +8248,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; if (bnx2x_read_sfp_module_eeprom(phy, params, + I2C_DEV_ADDR_A0, SFP_EEPROM_PART_NO_ADDR, SFP_EEPROM_PART_NO_SIZE, (u8 *)vendor_pn)) @@ -7944,7 +8259,9 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected," " Port %d from %s part number %s\n", params->port, vendor_name, vendor_pn); - phy->flags |= FLAGS_SFP_NOT_APPROVED; + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG) + phy->flags |= FLAGS_SFP_NOT_APPROVED; return -EINVAL; } @@ -7953,24 +8270,33 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, { u8 val; + int rc; struct bnx2x *bp = params->bp; u16 timeout; - /* - * Initialization time after hot-plug may take up to 300ms for + /* Initialization time after hot-plug may take up to 300ms for * some phys type ( e.g. JDSU ) */ for (timeout = 0; timeout < 60; timeout++) { - if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) - == 0) { + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + rc = bnx2x_warpcore_read_sfp_module_eeprom( + phy, params, I2C_DEV_ADDR_A0, 1, 1, &val, + 1); + else + rc = bnx2x_read_sfp_module_eeprom(phy, params, + I2C_DEV_ADDR_A0, + 1, 1, &val); + if (rc == 0) { DP(NETIF_MSG_LINK, "SFP+ module initialization took %d ms\n", timeout * 5); return 0; } - msleep(5); + usleep_range(5000, 10000); } - return -EINVAL; + rc = bnx2x_read_sfp_module_eeprom(phy, params, I2C_DEV_ADDR_A0, + 1, 1, &val); + return rc; } static void bnx2x_8727_power_module(struct bnx2x *bp, @@ -7978,8 +8304,7 @@ static void bnx2x_8727_power_module(struct bnx2x *bp, u8 is_power_up) { /* Make sure GPIOs are not using for LED mode */ u16 val; - /* - * In the GPIO register, bit 4 is use to determine if the GPIOs are + /* In the GPIO register, bit 4 is use to determine if the GPIOs are * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for * output * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0 @@ -7995,8 +8320,7 @@ static void bnx2x_8727_power_module(struct bnx2x *bp, if (is_power_up) val = (1<<4); else - /* - * Set GPIO control to OUTPUT, and set the power bit + /* Set GPIO control to OUTPUT, and set the power bit * to according to the is_power_up */ val = (1<<1); @@ -8030,8 +8354,7 @@ static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp, DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); - /* - * Changing to LRM mode takes quite few seconds. So do it only + /* Changing to LRM mode takes quite few seconds. So do it only * if current mode is limiting (default is LRM) */ if (cur_limiting_mode != EDC_MODE_LIMITING) @@ -8096,7 +8419,7 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, u32 action) { struct bnx2x *bp = params->bp; - + u16 val; switch (action) { case DISABLE_TX: bnx2x_sfp_set_transmitter(params, phy, 0); @@ -8105,6 +8428,31 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) bnx2x_sfp_set_transmitter(params, phy, 1); break; + case PHY_INIT: + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + (1<<2) | (1<<5)); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, + 0); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006); + /* Make MOD_ABS give interrupt on change */ + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_OPT_CTRL, + &val); + val |= (1<<12); + if (phy->flags & FLAGS_NOC) + val |= (3<<5); + /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 + * status which reflect SFP+ module over-current + */ + if (!(phy->flags & FLAGS_NOC)) + val &= 0xff8f; /* Reset bits 4-6 */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, + val); + break; default: DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", action); @@ -8166,8 +8514,7 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params, struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode); if (CHIP_IS_E3(bp)) { - /* - * Low ==> if SFP+ module is supported otherwise + /* Low ==> if SFP+ module is supported otherwise * High ==> if SFP+ module is not on the approved vendor list */ bnx2x_set_e3_module_fault_led(params, gpio_mode); @@ -8175,35 +8522,11 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params, bnx2x_set_e1e2_module_fault_led(params, gpio_mode); } -static void bnx2x_warpcore_power_module(struct link_params *params, - struct bnx2x_phy *phy, - u8 power) -{ - u32 pin_cfg; - struct bnx2x *bp = params->bp; - - pin_cfg = (REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].e3_sfp_ctrl)) & - PORT_HW_CFG_E3_PWR_DIS_MASK) >> - PORT_HW_CFG_E3_PWR_DIS_SHIFT; - - if (pin_cfg == PIN_CFG_NA) - return; - DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", - power, pin_cfg); - /* - * Low ==> corresponding SFP+ module is powered - * high ==> the SFP+ module is powered down - */ - bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); -} - static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy, struct link_params *params) { struct bnx2x *bp = params->bp; - bnx2x_warpcore_power_module(params, phy, 0); + bnx2x_warpcore_power_module(params, 0); /* Put Warpcore in low power mode */ REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e); @@ -8226,7 +8549,7 @@ static void bnx2x_power_sfp_module(struct link_params *params, bnx2x_8727_power_module(params->bp, phy, power); break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: - bnx2x_warpcore_power_module(params, phy, power); + bnx2x_warpcore_power_module(params, power); break; default: break; @@ -8252,6 +8575,7 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params, mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; break; case EDC_MODE_PASSIVE_DAC: + case EDC_MODE_ACTIVE_DAC: mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; break; default: @@ -8289,8 +8613,8 @@ static void bnx2x_set_limiting_mode(struct link_params *params, } } -int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, - struct link_params *params) +static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, + struct link_params *params) { struct bnx2x *bp = params->bp; u16 edc_mode; @@ -8299,7 +8623,8 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, u32 val = REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, dev_info. port_feature_config[params->port].config)); - + /* Enabled transmitter by default */ + bnx2x_sfp_set_transmitter(params, phy, 1); DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n", params->port); /* Power up module */ @@ -8308,7 +8633,7 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); return -EINVAL; } else if (bnx2x_verify_sfp_module(phy, params) != 0) { - /* check SFP+ module compatibility */ + /* Check SFP+ module compatibility */ DP(NETIF_MSG_LINK, "Module verification failed!!\n"); rc = -EINVAL; /* Turn on fault module-detected led */ @@ -8327,21 +8652,17 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); } - /* - * Check and set limiting mode / LRM mode on 8726. On 8727 it + /* Check and set limiting mode / LRM mode on 8726. On 8727 it * is done automatically */ bnx2x_set_limiting_mode(params, phy, edc_mode); - /* - * Enable transmit for this module if the module is approved, or - * if unapproved modules should also enable the Tx laser + /* Disable transmit for this module if the module is not approved, and + * laser needs to be disabled. */ - if (rc == 0 || - (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != - PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) - bnx2x_sfp_set_transmitter(params, phy, 1); - else + if ((rc) && + ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)) bnx2x_sfp_set_transmitter(params, phy, 0); return rc; @@ -8353,11 +8674,13 @@ void bnx2x_handle_module_detect_int(struct link_params *params) struct bnx2x_phy *phy; u32 gpio_val; u8 gpio_num, gpio_port; - if (CHIP_IS_E3(bp)) + if (CHIP_IS_E3(bp)) { phy = ¶ms->phy[INT_PHY]; - else + /* Always enable TX laser,will be disabled in case of fault */ + bnx2x_sfp_set_transmitter(params, phy, 1); + } else { phy = ¶ms->phy[EXT_PHY1]; - + } if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base, params->port, &gpio_num, &gpio_port) == -EINVAL) { @@ -8373,31 +8696,44 @@ void bnx2x_handle_module_detect_int(struct link_params *params) /* Call the handling function in case module is detected */ if (gpio_val == 0) { + bnx2x_set_mdio_emac_per_phy(bp, params); + bnx2x_set_aer_mmd(params, phy); + bnx2x_power_sfp_module(params, phy, 1); bnx2x_set_gpio_int(bp, gpio_num, MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, gpio_port); - if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) + if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) { bnx2x_sfp_module_detection(phy, params); - else + if (CHIP_IS_E3(bp)) { + u16 rx_tx_in_reset; + /* In case WC is out of reset, reconfigure the + * link speed while taking into account 1G + * module limitation. + */ + bnx2x_cl45_read(bp, phy, + MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, + &rx_tx_in_reset); + if ((!rx_tx_in_reset) && + (params->link_flags & + PHY_INITIALIZED)) { + bnx2x_warpcore_reset_lane(bp, phy, 1); + bnx2x_warpcore_config_sfi(phy, params); + bnx2x_warpcore_reset_lane(bp, phy, 0); + } + } + } else { DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); + } } else { - u32 val = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_feature_config[params->port]. - config)); bnx2x_set_gpio_int(bp, gpio_num, MISC_REGISTERS_GPIO_INT_OUTPUT_SET, gpio_port); - /* - * Module was plugged out. + /* Module was plugged out. * Disable transmit for this module */ phy->media_type = ETH_PHY_NOT_PRESENT; - if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == - PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) || - CHIP_IS_E3(bp)) - bnx2x_sfp_set_transmitter(params, phy, 0); } } @@ -8442,7 +8778,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, MDIO_PMA_LASI_TXCTRL); - /* clear LASI indication*/ + /* Clear LASI indication*/ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); bnx2x_cl45_read(bp, phy, @@ -8460,8 +8796,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" " link_status 0x%x\n", rx_sd, pcs_status, val2); - /* - * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status + /* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status * are set, or if the autoneg bit 1 is set */ link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); @@ -8511,7 +8846,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val); if (val) break; - msleep(10); + usleep_range(10000, 20000); } DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt); if ((params->feature_config_flags & @@ -8575,8 +8910,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, } bnx2x_save_bcm_spirom_ver(bp, phy, params->port); - /* - * If TX Laser is controlled by GPIO_0, do not let PHY go into low + /* If TX Laser is controlled by GPIO_0, do not let PHY go into low * power mode, if TX Laser is disabled */ @@ -8641,7 +8975,7 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy, MDIO_PMA_REG_GEN_CTRL, MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); - /* wait for 150ms for microcode load */ + /* Wait for 150ms for microcode load */ msleep(150); /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ @@ -8686,8 +9020,7 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy, bnx2x_8726_external_rom_boot(phy, params); - /* - * Need to call module detected on initialization since the module + /* Need to call module detected on initialization since the module * detection triggered by actual module insertion might occur before * driver is loaded, and when driver is loaded, it reset all * registers, including the transmitter @@ -8724,8 +9057,7 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); - /* - * Enable RX-ALARM control to receive interrupt for 1G speed + /* Enable RX-ALARM control to receive interrupt for 1G speed * change */ bnx2x_cl45_write(bp, phy, @@ -8826,8 +9158,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy, struct link_params *params) { u32 swap_val, swap_override; u8 port; - /* - * The PHY reset is controlled by GPIO 1. Fake the port number + /* The PHY reset is controlled by GPIO 1. Fake the port number * to cancel the swap done in set_gpio() */ struct bnx2x *bp = params->bp; @@ -8838,80 +9169,14 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy, MISC_REGISTERS_GPIO_OUTPUT_LOW, port); } -static int bnx2x_8727_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static void bnx2x_8727_config_speed(struct bnx2x_phy *phy, + struct link_params *params) { - u32 tx_en_mode; - u16 tmp1, val, mod_abs, tmp2; - u16 rx_alarm_ctrl_val; - u16 lasi_ctrl_val; struct bnx2x *bp = params->bp; - /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ - - bnx2x_wait_reset_complete(bp, phy, params); - rx_alarm_ctrl_val = (1<<2) | (1<<5) ; - /* Should be 0x6 to enable XS on Tx side. */ - lasi_ctrl_val = 0x0006; - - DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); - /* enable LASI */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, - rx_alarm_ctrl_val); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, - 0); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val); - - /* - * Initially configure MOD_ABS to interrupt when module is - * presence( bit 8) - */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); - /* - * Set EDC off by setting OPTXLOS signal input to low (bit 9). - * When the EDC is off it locks onto a reference clock and avoids - * becoming 'lost' - */ - mod_abs &= ~(1<<8); - if (!(phy->flags & FLAGS_NOC)) - mod_abs &= ~(1<<9); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); - - - /* Enable/Disable PHY transmitter output */ - bnx2x_set_disable_pmd_transmit(params, phy, 0); - - /* Make MOD_ABS give interrupt on change */ - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, - &val); - val |= (1<<12); - if (phy->flags & FLAGS_NOC) - val |= (3<<5); - - /* - * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 - * status which reflect SFP+ module over-current - */ - if (!(phy->flags & FLAGS_NOC)) - val &= 0xff8f; /* Reset bits 4-6 */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val); - - bnx2x_8727_power_module(bp, phy, 1); - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); - + u16 tmp1, val; /* Set option 1G speed */ - if (phy->req_line_speed == SPEED_1000) { + if ((phy->req_line_speed == SPEED_1000) || + (phy->media_type == ETH_PHY_SFP_1G_FIBER)) { DP(NETIF_MSG_LINK, "Setting 1G force\n"); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); @@ -8920,8 +9185,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); - /* - * Power down the XAUI until link is up in case of dual-media + /* Power down the XAUI until link is up in case of dual-media * and 1G */ if (DUAL_MEDIA(params)) { @@ -8946,8 +9210,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); } else { - /* - * Since the 8727 has only single reset pin, need to set the 10G + /* Since the 8727 has only single reset pin, need to set the 10G * registers although it is default */ bnx2x_cl45_write(bp, phy, @@ -8961,15 +9224,50 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x0008); } +} - /* - * Set 2-wire transfer rate of SFP+ module EEPROM - * to 100Khz since some DACs(direct attached cables) do - * not work at 400Khz. +static int bnx2x_8727_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u32 tx_en_mode; + u16 tmp1, mod_abs, tmp2; + struct bnx2x *bp = params->bp; + /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ + + bnx2x_wait_reset_complete(bp, phy, params); + + DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); + + bnx2x_8727_specific_func(phy, params, PHY_INIT); + /* Initially configure MOD_ABS to interrupt when module is + * presence( bit 8) + */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); + /* Set EDC off by setting OPTXLOS signal input to low (bit 9). + * When the EDC is off it locks onto a reference clock and avoids + * becoming 'lost' */ + mod_abs &= ~(1<<8); + if (!(phy->flags & FLAGS_NOC)) + mod_abs &= ~(1<<9); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, - 0xa001); + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + + /* Enable/Disable PHY transmitter output */ + bnx2x_set_disable_pmd_transmit(params, phy, 0); + + bnx2x_8727_power_module(bp, phy, 1); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); + + bnx2x_8727_config_speed(phy, params); + /* Set TX PreEmphasis if needed */ if ((params->feature_config_flags & @@ -8986,8 +9284,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, phy->tx_preemphasis[1]); } - /* - * If TX Laser is controlled by GPIO_0, do not let PHY go into low + /* If TX Laser is controlled by GPIO_0, do not let PHY go into low * power mode, if TX Laser is disabled */ tx_en_mode = REG_RD(bp, params->shmem_base + @@ -9004,6 +9301,12 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, tmp2 &= 0xFFEF; bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, + &tmp2); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, + (tmp2 & 0x7fff)); } return 0; @@ -9027,8 +9330,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "MOD_ABS indication show module is absent\n"); phy->media_type = ETH_PHY_NOT_PRESENT; - /* - * 1. Set mod_abs to detect next module + /* 1. Set mod_abs to detect next module * presence event * 2. Set EDC off by setting OPTXLOS signal input to low * (bit 9). @@ -9042,8 +9344,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); - /* - * Clear RX alarm since it stays up as long as + /* Clear RX alarm since it stays up as long as * the mod_abs wasn't changed */ bnx2x_cl45_read(bp, phy, @@ -9054,8 +9355,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, /* Module is present */ DP(NETIF_MSG_LINK, "MOD_ABS indication show module is present\n"); - /* - * First disable transmitter, and if the module is ok, the + /* First disable transmitter, and if the module is ok, the * module_detection will enable it * 1. Set mod_abs to detect next module absent event ( bit 8) * 2. Restore the default polarity of the OPRXLOS signal and @@ -9069,8 +9369,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); - /* - * Clear RX alarm since it stays up as long as the mod_abs + /* Clear RX alarm since it stays up as long as the mod_abs * wasn't changed. This is need to be done before calling the * module detection, otherwise it will clear* the link update * alarm @@ -9088,6 +9387,9 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, bnx2x_sfp_module_detection(phy, params); else DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); + + /* Reconfigure link speed based on module type limitations */ + bnx2x_8727_config_speed(phy, params); } DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", @@ -9131,8 +9433,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); - /* - * If a module is present and there is need to check + /* If a module is present and there is need to check * for over current */ if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) { @@ -9172,6 +9473,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); + bnx2x_8727_power_module(params->bp, phy, 0); return 0; } } /* Over current check */ @@ -9184,12 +9486,11 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, ((1<<5) | (1<<2))); } - DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n"); - bnx2x_8727_specific_func(phy, params, ENABLE_TX); - /* If transmitter is disabled, ignore false link up indication */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1); - if (val1 & (1<<15)) { + + if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) { + DP(NETIF_MSG_LINK, "Enabling 8727 TX laser\n"); + bnx2x_sfp_set_transmitter(params, phy, 1); + } else { DP(NETIF_MSG_LINK, "Tx is disabled\n"); return 0; } @@ -9198,8 +9499,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); - /* - * Bits 0..2 --> speed detected, + /* Bits 0..2 --> speed detected, * Bits 13..15--> link is down */ if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { @@ -9242,8 +9542,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_GP, &val1); - /* - * In case of dual-media board and 1G, power up the XAUI side, + /* In case of dual-media board and 1G, power up the XAUI side, * otherwise power it down. For 10G it is done automatically */ if (link_up) @@ -9279,21 +9578,27 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, struct bnx2x *bp, u8 port) { - u16 val, fw_ver1, fw_ver2, cnt; - - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { + u16 val, fw_ver2, cnt, i; + static struct bnx2x_reg_set reg_set[] = { + {MDIO_PMA_DEVAD, 0xA819, 0x0014}, + {MDIO_PMA_DEVAD, 0xA81A, 0xc200}, + {MDIO_PMA_DEVAD, 0xA81B, 0x0000}, + {MDIO_PMA_DEVAD, 0xA81C, 0x0300}, + {MDIO_PMA_DEVAD, 0xA817, 0x0009} + }; + u16 fw_ver1; + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); - bnx2x_save_spirom_version(bp, port, - ((fw_ver1 & 0xf000)>>5) | (fw_ver1 & 0x7f), + bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, phy->ver_addr); } else { /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009); + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + bnx2x_cl45_write(bp, phy, reg_set[i].devad, + reg_set[i].reg, reg_set[i].val); for (cnt = 0; cnt < 100; cnt++) { bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); @@ -9341,8 +9646,16 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, static void bnx2x_848xx_set_led(struct bnx2x *bp, struct bnx2x_phy *phy) { - u16 val, offset; - + u16 val, offset, i; + static struct bnx2x_reg_set reg_set[] = { + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH, + MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ}, + {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD} + }; /* PHYC_CTL_LED_CTL */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, @@ -9354,49 +9667,44 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LINK_SIGNAL, val); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x80); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED2_MASK, - 0x18); - - /* Select activity source by Tx and Rx, as suggested by PHY AE */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_MASK, - 0x0006); + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); - /* Select the closest activity blink rate to that in 10/100/1000 */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_BLINK, - 0); - - /* Configure the blink rate to ~15.9 Hz */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH, - MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ); - - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1; else offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, offset, &val); - val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, offset, val); + /* stretch_en for LED3*/ + bnx2x_cl45_read_or_write(bp, phy, + MDIO_PMA_DEVAD, offset, + MDIO_PMA_REG_84823_LED3_STRETCH_EN); +} - /* 'Interrupt Mask' */ - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, - 0xFFFB, 0xFFFD); +static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + switch (action) { + case PHY_INIT: + if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && + (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + /* Save spirom version */ + bnx2x_save_848xx_spirom_version(phy, bp, params->port); + } + /* This phy uses the NIG latch mechanism since link indication + * arrives through its LED4 and not via its LASI signal, so we + * get steady signal instead of clear on read + */ + bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, + 1 << NIG_LATCH_BC_ENABLE_MI_INT); + + bnx2x_848xx_set_led(bp, phy); + break; + } } static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, @@ -9404,30 +9712,12 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, struct link_vars *vars) { struct bnx2x *bp = params->bp; - u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val; - u16 tmp_req_line_speed; - - tmp_req_line_speed = phy->req_line_speed; - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { - if (phy->req_line_speed == SPEED_10000) - phy->req_line_speed = SPEED_AUTO_NEG; - } else { - /* Save spirom version */ - bnx2x_save_848xx_spirom_version(phy, bp, params->port); - } - /* - * This phy uses the NIG latch mechanism since link indication - * arrives through its LED4 and not via its LASI signal, so we - * get steady signal instead of clear on read - */ - bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, - 1 << NIG_LATCH_BC_ENABLE_MI_INT); + u16 autoneg_val, an_1000_val, an_10_100_val; + bnx2x_848xx_specific_func(phy, params, PHY_INIT); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000); - bnx2x_848xx_set_led(bp, phy); - /* set 1000 speed advertisement */ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, @@ -9461,32 +9751,41 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, an_1000_val); - /* set 100 speed advertisement */ - if ((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) { - an_10_100_val |= (1<<7); - /* Enable autoneg and restart autoneg for legacy speeds */ - autoneg_val |= (1<<9 | 1<<12); - - if (phy->req_duplex == DUPLEX_FULL) + /* Set 10/100 speed advertisement */ + if (phy->req_line_speed == SPEED_AUTO_NEG) { + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { + /* Enable autoneg and restart autoneg for legacy speeds + */ + autoneg_val |= (1<<9 | 1<<12); an_10_100_val |= (1<<8); - DP(NETIF_MSG_LINK, "Advertising 100M\n"); - } - /* set 10 speed advertisement */ - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) && - (phy->supported & - (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full)))) { - an_10_100_val |= (1<<5); - autoneg_val |= (1<<9 | 1<<12); - if (phy->req_duplex == DUPLEX_FULL) + DP(NETIF_MSG_LINK, "Advertising 100M-FD\n"); + } + + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { + /* Enable autoneg and restart autoneg for legacy speeds + */ + autoneg_val |= (1<<9 | 1<<12); + an_10_100_val |= (1<<7); + DP(NETIF_MSG_LINK, "Advertising 100M-HD\n"); + } + + if ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && + (phy->supported & SUPPORTED_10baseT_Full)) { an_10_100_val |= (1<<6); - DP(NETIF_MSG_LINK, "Advertising 10M\n"); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-FD\n"); + } + + if ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) && + (phy->supported & SUPPORTED_10baseT_Half)) { + an_10_100_val |= (1<<5); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-HD\n"); + } } /* Only 10/100 are allowed to work in FORCE mode */ @@ -9521,12 +9820,12 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, if (phy->req_duplex == DUPLEX_FULL) autoneg_val |= (1<<8); - /* - * Always write this if this is not 84833. - * For 84833, write it only when it's a forced speed. + /* Always write this if this is not 84833/4. + * For 84833/4, write it only when it's a forced speed. */ - if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - ((autoneg_val & (1<<12)) == 0)) + if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && + (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) || + ((autoneg_val & (1<<12)) == 0)) bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val); @@ -9538,14 +9837,11 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Advertising 10G\n"); /* Restart autoneg for 10G*/ - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, - &an_10g_val); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, - an_10g_val | 0x1000); + bnx2x_cl45_read_or_write( + bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, + 0x1000); bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x3200); @@ -9555,8 +9851,6 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, 1); - phy->req_line_speed = tmp_req_line_speed; - return 0; } @@ -9580,11 +9874,10 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy, #define PHY84833_CMDHDLR_WAIT 300 #define PHY84833_CMDHDLR_MAX_ARGS 5 static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, - struct link_params *params, - u16 fw_cmd, - u16 cmd_args[]) + struct link_params *params, u16 fw_cmd, + u16 cmd_args[], int argc) { - u32 idx; + int idx; u16 val; struct bnx2x *bp = params->bp; /* Write CMD_OPEN_OVERRIDE to STATUS reg */ @@ -9596,7 +9889,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, MDIO_84833_CMD_HDLR_STATUS, &val); if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS) break; - msleep(1); + usleep_range(1000, 2000); } if (idx >= PHY84833_CMDHDLR_WAIT) { DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); @@ -9604,7 +9897,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, } /* Prepare argument(s) and issue command */ - for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) { + for (idx = 0; idx < argc; idx++) { bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, MDIO_84833_CMD_HDLR_DATA1 + idx, cmd_args[idx]); @@ -9617,7 +9910,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) || (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) break; - msleep(1); + usleep_range(1000, 2000); } if ((idx >= PHY84833_CMDHDLR_WAIT) || (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { @@ -9625,7 +9918,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, return -EINVAL; } /* Gather returning data */ - for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) { + for (idx = 0; idx < argc; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, MDIO_84833_CMD_HDLR_DATA1 + idx, &cmd_args[idx]); @@ -9636,7 +9929,6 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, return 0; } - static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -9659,7 +9951,7 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, data[1] = (u16)pair_swap; status = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_PAIR_SWAP, data); + PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS); if (status == 0) DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); @@ -9714,6 +10006,15 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, other_shmem_base_addr)); u32 shmem_base_path[2]; + + /* Work around for 84833 LED failure inside RESET status */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_MII_CTRL, + MDIO_AN_REG_8481_MII_CTRL_FORCE_1G); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_1G_100T_EXT_CTRL, + MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF); + shmem_base_path[0] = params->shmem_base; shmem_base_path[1] = other_shmem_base_addr; @@ -9728,6 +10029,45 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, return 0; } +static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + int rc; + struct bnx2x *bp = params->bp; + u16 cmd_args = 0; + + DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); + + /* Prevent Phy from working in EEE and advertising it */ + rc = bnx2x_84833_cmd_hdlr(phy, params, + PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); + if (rc) { + DP(NETIF_MSG_LINK, "EEE disable failed.\n"); + return rc; + } + + return bnx2x_eee_disable(phy, params, vars); +} + +static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + int rc; + struct bnx2x *bp = params->bp; + u16 cmd_args = 1; + + rc = bnx2x_84833_cmd_hdlr(phy, params, + PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); + if (rc) { + DP(NETIF_MSG_LINK, "EEE enable failed.\n"); + return rc; + } + + return bnx2x_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV); +} + #define PHY84833_CONSTANT_LATENCY 1193 static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, struct link_params *params, @@ -9736,13 +10076,13 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u8 port, initialize = 1; u16 val; - u32 actual_phy_selection, cms_enable; + u32 actual_phy_selection; u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS]; int rc = 0; - msleep(1); + usleep_range(1000, 2000); - if (!(CHIP_IS_E1(bp))) + if (!(CHIP_IS_E1x(bp))) port = BP_PATH(bp); else port = params->port; @@ -9762,9 +10102,9 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, /* Wait for GPHY to come out of reset */ msleep(50); - if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { - /* - * BCM84823 requires that XGXS links up first @ 10G for normal + if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && + (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + /* BCM84823 requires that XGXS links up first @ 10G for normal * behavior. */ u16 temp; @@ -9819,7 +10159,8 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", params->multi_phy_config, val); - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { bnx2x_84833_pair_swap_cfg(phy, params, vars); /* Keep AutogrEEEn disabled. */ @@ -9828,8 +10169,9 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; cmd_args[3] = PHY84833_CONSTANT_LATENCY; rc = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_EEE_MODE, cmd_args); - if (rc != 0) + PHY84833_CMD_SET_EEE_MODE, cmd_args, + PHY84833_CMDHDLR_MAX_ARGS); + if (rc) DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); } if (initialize) @@ -9838,7 +10180,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, bnx2x_save_848xx_spirom_version(phy, bp, params->port); /* 84833 PHY has a better feature and doesn't need to support this. */ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { - cms_enable = REG_RD(bp, params->shmem_base + + u32 cms_enable = REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, dev_info.port_hw_config[params->port].default_cfg)) & PORT_HW_CFG_ENABLE_CMS_MASK; @@ -9853,15 +10195,42 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, MDIO_CTL_REG_84823_USER_CTRL_REG, val); } - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_FW_REV, &val); + + /* Configure EEE support */ + if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && + (val != MDIO_84833_TOP_CFG_FW_NO_EEE) && + bnx2x_eee_has_cap(params)) { + rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV); + if (rc) { + DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); + bnx2x_8483x_disable_eee(phy, params, vars); + return rc; + } + + if ((phy->req_duplex == DUPLEX_FULL) && + (params->eee_mode & EEE_MODE_ADV_LPI) && + (bnx2x_eee_calc_timer(params) || + !(params->eee_mode & EEE_MODE_ENABLE_LPI))) + rc = bnx2x_8483x_enable_eee(phy, params, vars); + else + rc = bnx2x_8483x_disable_eee(phy, params, vars); + if (rc) { + DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n"); + return rc; + } + } else { + vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; + } + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { /* Bring PHY out of super isolate mode as the final step. */ - bnx2x_cl45_read(bp, phy, - MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val); - val &= ~MDIO_84833_SUPER_ISOLATE; - bnx2x_cl45_write(bp, phy, - MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); + bnx2x_cl45_read_and_write(bp, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, + (u16)~MDIO_84833_SUPER_ISOLATE); } return rc; } @@ -9907,17 +10276,19 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n", legacy_status); link_up = ((legacy_status & (1<<11)) == (1<<11)); - if (link_up) { - legacy_speed = (legacy_status & (3<<9)); - if (legacy_speed == (0<<9)) - vars->line_speed = SPEED_10; - else if (legacy_speed == (1<<9)) - vars->line_speed = SPEED_100; - else if (legacy_speed == (2<<9)) - vars->line_speed = SPEED_1000; - else /* Should not happen */ - vars->line_speed = 0; + legacy_speed = (legacy_status & (3<<9)); + if (legacy_speed == (0<<9)) + vars->line_speed = SPEED_10; + else if (legacy_speed == (1<<9)) + vars->line_speed = SPEED_100; + else if (legacy_speed == (2<<9)) + vars->line_speed = SPEED_1000; + else { /* Should not happen: Treat as link down */ + vars->line_speed = 0; + link_up = 0; + } + if (link_up) { if (legacy_status & (1<<8)) vars->duplex = DUPLEX_FULL; else @@ -9945,15 +10316,55 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, } } if (link_up) { - DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n", + DP(NETIF_MSG_LINK, "BCM848x3: link speed is %d\n", vars->line_speed); bnx2x_ext_phy_resolve_fc(phy, params, vars); + + /* Read LP advertised speeds */ + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LP, &val); + if (val & (1<<5)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10THD_CAPABLE; + if (val & (1<<6)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE; + if (val & (1<<7)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE; + if (val & (1<<8)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE; + if (val & (1<<9)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100T4_CAPABLE; + + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_1000T_STATUS, &val); + + if (val & (1<<10)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE; + if (val & (1<<11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_MASTER_STATUS, &val); + + if (val & (1<<11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + + /* Determine if EEE was negotiated */ + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) + bnx2x_eee_an_resolve(phy, params, vars); } return link_up; } - static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) { int status = 0; @@ -9988,7 +10399,7 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy, u8 port; u16 val16; - if (!(CHIP_IS_E1(bp))) + if (!(CHIP_IS_E1x(bp))) port = BP_PATH(bp); else port = params->port; @@ -10015,7 +10426,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, u16 val; u8 port; - if (!(CHIP_IS_E1(bp))) + if (!(CHIP_IS_E1x(bp))) port = BP_PATH(bp); else port = params->port; @@ -10090,6 +10501,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0); + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { + /* Disable MI_INT interrupt before setting LED4 + * source to constant off. + */ + if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + + params->port*4) & + NIG_MASK_MI_INT) { + params->link_flags |= + LINK_FLAGS_INT_DISABLED; + + bnx2x_bits_dis( + bp, + NIG_REG_MASK_INTERRUPT_PORT0 + + params->port*4, + NIG_MASK_MI_INT); + } + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_SIGNAL_MASK, + 0x0); + } } break; case LED_MODE_ON: @@ -10136,6 +10569,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x20); + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { + /* Disable MI_INT interrupt before setting LED4 + * source to constant on. + */ + if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + + params->port*4) & + NIG_MASK_MI_INT) { + params->link_flags |= + LINK_FLAGS_INT_DISABLED; + + bnx2x_bits_dis( + bp, + NIG_REG_MASK_INTERRUPT_PORT0 + + params->port*4, + NIG_MASK_MI_INT); + } + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_SIGNAL_MASK, + 0x20); + } } break; @@ -10184,10 +10639,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, 0x40); } else { + /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED + * sources are all wired through LED1, rather than only + * 10G in other modes. + */ + val = ((params->hw_led_mode << + SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80; + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, - 0x80); + val); /* Tell LED3 to blink on source */ bnx2x_cl45_read(bp, phy, @@ -10200,12 +10663,27 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LINK_SIGNAL, val); + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { + /* Restore LED4 source to external link, + * and re-enable interrupts. + */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_SIGNAL_MASK, + 0x40); + if (params->link_flags & + LINK_FLAGS_INT_DISABLED) { + bnx2x_link_int_enable(params); + params->link_flags &= + ~LINK_FLAGS_INT_DISABLED; + } + } } break; } - /* - * This is a workaround for E3+84833 until autoneg + /* This is a workaround for E3+84833 until autoneg * restart is fixed in f/w */ if (CHIP_IS_E3(bp)) { @@ -10217,6 +10695,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, /******************************************************************/ /* 54618SE PHY SECTION */ /******************************************************************/ +static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + u16 temp; + switch (action) { + case PHY_INIT: + /* Configure LED4: set to INTR (0x6). */ + /* Accessing shadow register 0xe. */ + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_LED_SEL2); + bnx2x_cl22_read(bp, phy, + MDIO_REG_GPHY_SHADOW, + &temp); + temp &= ~(0xf << 4); + temp |= (0x6 << 4); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + /* Configure INTR based on link status change. */ + bnx2x_cl22_write(bp, phy, + MDIO_REG_INTR_MASK, + ~MDIO_REG_INTR_MASK_LINK_STATUS); + break; + } +} + static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -10227,10 +10734,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, u32 cfg_pin; DP(NETIF_MSG_LINK, "54618SE cfg init\n"); - usleep_range(1000, 1000); + usleep_range(1000, 2000); - /* - * This works with E3 only, no need to check the chip + /* This works with E3 only, no need to check the chip * before determining the port. */ port = params->port; @@ -10252,27 +10758,11 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, MDIO_PMA_REG_CTRL, 0x8000); bnx2x_wait_reset_complete(bp, phy, params); - /*wait for GPHY to reset */ + /* Wait for GPHY to reset */ msleep(50); - /* Configure LED4: set to INTR (0x6). */ - /* Accessing shadow register 0xe. */ - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_SHADOW, - MDIO_REG_GPHY_SHADOW_LED_SEL2); - bnx2x_cl22_read(bp, phy, - MDIO_REG_GPHY_SHADOW, - &temp); - temp &= ~(0xf << 4); - temp |= (0x6 << 4); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_SHADOW, - MDIO_REG_GPHY_SHADOW_WR_ENA | temp); - /* Configure INTR based on link status change. */ - bnx2x_cl22_write(bp, phy, - MDIO_REG_INTR_MASK, - ~MDIO_REG_INTR_MASK_LINK_STATUS); + bnx2x_54618se_specific_func(phy, params, PHY_INIT); /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */ bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_SHADOW, @@ -10297,7 +10787,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; - /* read all advertisement */ + /* Read all advertisement */ bnx2x_cl22_read(bp, phy, 0x09, &an_1000_val); @@ -10316,9 +10806,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, (1<<11)); if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || - (phy->req_line_speed == SPEED_1000)) { + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->req_line_speed == SPEED_1000)) { an_1000_val |= (1<<8); autoneg_val |= (1<<9 | 1<<12); if (phy->req_duplex == DUPLEX_FULL) @@ -10334,30 +10824,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, 0x09, &an_1000_val); - /* set 100 speed advertisement */ - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { - an_10_100_val |= (1<<7); - /* Enable autoneg and restart autoneg for legacy speeds */ - autoneg_val |= (1<<9 | 1<<12); - - if (phy->req_duplex == DUPLEX_FULL) - an_10_100_val |= (1<<8); - DP(NETIF_MSG_LINK, "Advertising 100M\n"); - } - - /* set 10 speed advertisement */ - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) { - an_10_100_val |= (1<<5); - autoneg_val |= (1<<9 | 1<<12); - if (phy->req_duplex == DUPLEX_FULL) + /* Advertise 10/100 link speed */ + if (phy->req_line_speed == SPEED_AUTO_NEG) { + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) { + an_10_100_val |= (1<<5); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-HD\n"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) { an_10_100_val |= (1<<6); - DP(NETIF_MSG_LINK, "Advertising 10M\n"); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-FD\n"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { + an_10_100_val |= (1<<7); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 100M-HD\n"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { + an_10_100_val |= (1<<8); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 100M-FD\n"); + } } /* Only 10/100 are allowed to work in FORCE mode */ @@ -10377,28 +10869,52 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Setting 10M force\n"); } - /* Check if we should turn on Auto-GrEEEn */ - bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp); - if (temp == MDIO_REG_GPHY_ID_54618SE) { - if (params->feature_config_flags & - FEATURE_CONFIG_AUTOGREEEN_ENABLED) { - temp = 6; - DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n"); + if ((phy->flags & FLAGS_EEE) && bnx2x_eee_has_cap(params)) { + int rc; + + bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS, + MDIO_REG_GPHY_EXP_ACCESS_TOP | + MDIO_REG_GPHY_EXP_TOP_2K_BUF); + bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp); + temp &= 0xfffe; + bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp); + + rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV); + if (rc) { + DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); + bnx2x_eee_disable(phy, params, vars); + } else if ((params->eee_mode & EEE_MODE_ADV_LPI) && + (phy->req_duplex == DUPLEX_FULL) && + (bnx2x_eee_calc_timer(params) || + !(params->eee_mode & EEE_MODE_ENABLE_LPI))) { + /* Need to advertise EEE only when requested, + * and either no LPI assertion was requested, + * or it was requested and a valid timer was set. + * Also notice full duplex is required for EEE. + */ + bnx2x_eee_advertise(phy, params, vars, + SHMEM_EEE_1G_ADV); } else { - temp = 0; - DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n"); + DP(NETIF_MSG_LINK, "Don't Advertise 1GBase-T EEE\n"); + bnx2x_eee_disable(phy, params, vars); + } + } else { + vars->eee_status &= ~SHMEM_EEE_1G_ADV << + SHMEM_EEE_SUPPORTED_SHIFT; + + if (phy->flags & FLAGS_EEE) { + /* Handle legacy auto-grEEEn */ + if (params->feature_config_flags & + FEATURE_CONFIG_AUTOGREEEN_ENABLED) { + temp = 6; + DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n"); + } else { + temp = 0; + DP(NETIF_MSG_LINK, "Don't Adv. EEE\n"); + } + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_EEE_ADV, temp); } - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - MDIO_REG_GPHY_EEE_ADV); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, - (0x1 << 14) | MDIO_AN_DEVAD); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - temp); } bnx2x_cl22_write(bp, phy, @@ -10458,13 +10974,11 @@ static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy, u32 cfg_pin; u8 port; - /* - * In case of no EPIO routed to reset the GPHY, put it + /* In case of no EPIO routed to reset the GPHY, put it * in low power mode. */ bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800); - /* - * This works with E3 only, no need to check the chip + /* This works with E3 only, no need to check the chip * before determining the port. */ port = params->port; @@ -10489,7 +11003,7 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy, /* Get speed operation status */ bnx2x_cl22_read(bp, phy, - 0x19, + MDIO_REG_GPHY_AUX_STATUS, &legacy_status); DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status); @@ -10547,30 +11061,40 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n", vars->line_speed); - /* Report whether EEE is resolved. */ - bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val); - if (val == MDIO_REG_GPHY_ID_54618SE) { - if (vars->link_status & - LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) - val = 0; - else { - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, - MDIO_AN_DEVAD); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - MDIO_REG_GPHY_EEE_RESOLVED); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, - (0x1 << 14) | MDIO_AN_DEVAD); - bnx2x_cl22_read(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - &val); - } - DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val); - } - bnx2x_ext_phy_resolve_fc(phy, params, vars); + + if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { + /* Report LP advertised speeds */ + bnx2x_cl22_read(bp, phy, 0x5, &val); + + if (val & (1<<5)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10THD_CAPABLE; + if (val & (1<<6)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE; + if (val & (1<<7)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE; + if (val & (1<<8)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE; + if (val & (1<<9)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100T4_CAPABLE; + + bnx2x_cl22_read(bp, phy, 0xa, &val); + if (val & (1<<10)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE; + if (val & (1<<11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + + if ((phy->flags & FLAGS_EEE) && + bnx2x_eee_has_cap(params)) + bnx2x_eee_an_resolve(phy, params, vars); + } } return link_up; } @@ -10609,8 +11133,7 @@ static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy, /* This register opens the gate for the UMAC despite its name */ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); - /* - * Maximum Frame Length (RW). Defines a 14-Bit maximum frame + /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame * length used by the MAC receive logic to check frames. */ REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); @@ -10688,7 +11211,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n", val2, val1); link_up = ((val1 & 4) == 4); - /* if link is up print the AN outcome of the SFX7101 PHY */ + /* If link is up print the AN outcome of the SFX7101 PHY */ if (link_up) { bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, @@ -10699,6 +11222,11 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, val2, (val2 & (1<<14))); bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); bnx2x_ext_phy_resolve_fc(phy, params, vars); + + /* Read LP advertised speeds */ + if (val2 & (1<<11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; } return link_up; } @@ -10778,7 +11306,7 @@ static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy, /* STATIC PHY DECLARATION */ /******************************************************************/ -static struct bnx2x_phy phy_null = { +static const struct bnx2x_phy phy_null = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN, .addr = 0, .def_md_devad = 0, @@ -10804,7 +11332,7 @@ static struct bnx2x_phy phy_null = { .phy_specific_func = (phy_specific_func_t)NULL }; -static struct bnx2x_phy phy_serdes = { +static const struct bnx2x_phy phy_serdes = { .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT, .addr = 0xff, .def_md_devad = 0, @@ -10839,7 +11367,7 @@ static struct bnx2x_phy phy_serdes = { .phy_specific_func = (phy_specific_func_t)NULL }; -static struct bnx2x_phy phy_xgxs = { +static const struct bnx2x_phy phy_xgxs = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, .addr = 0xff, .def_md_devad = 0, @@ -10872,28 +11400,28 @@ static struct bnx2x_phy phy_xgxs = { .format_fw_ver = (format_fw_ver_t)NULL, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL + .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func }; -static struct bnx2x_phy phy_warpcore = { +static const struct bnx2x_phy phy_warpcore = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, .addr = 0xff, .def_md_devad = 0, - .flags = FLAGS_HW_LOCK_REQUIRED, + .flags = FLAGS_TX_ERROR_CHECK, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, .supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_10000baseT_Full | - SUPPORTED_20000baseKR2_Full | - SUPPORTED_20000baseMLD2_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_20000baseKR2_Full | + SUPPORTED_20000baseMLD2_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), .media_type = ETH_PHY_UNSPECIFIED, .ver_addr = 0, .req_flow_ctrl = 0, @@ -10912,7 +11440,7 @@ static struct bnx2x_phy phy_warpcore = { }; -static struct bnx2x_phy phy_7101 = { +static const struct bnx2x_phy phy_7101 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, .addr = 0xff, .def_md_devad = 0, @@ -10941,11 +11469,11 @@ static struct bnx2x_phy phy_7101 = { .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led, .phy_specific_func = (phy_specific_func_t)NULL }; -static struct bnx2x_phy phy_8073 = { +static const struct bnx2x_phy phy_8073 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, .addr = 0xff, .def_md_devad = 0, - .flags = FLAGS_HW_LOCK_REQUIRED, + .flags = 0, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -10970,9 +11498,9 @@ static struct bnx2x_phy phy_8073 = { .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL + .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func }; -static struct bnx2x_phy phy_8705 = { +static const struct bnx2x_phy phy_8705 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705, .addr = 0xff, .def_md_devad = 0, @@ -11000,7 +11528,7 @@ static struct bnx2x_phy phy_8705 = { .set_link_led = (set_link_led_t)NULL, .phy_specific_func = (phy_specific_func_t)NULL }; -static struct bnx2x_phy phy_8706 = { +static const struct bnx2x_phy phy_8706 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, .addr = 0xff, .def_md_devad = 0, @@ -11013,7 +11541,7 @@ static struct bnx2x_phy phy_8706 = { SUPPORTED_FIBRE | SUPPORTED_Pause | SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_SFP_FIBER, + .media_type = ETH_PHY_SFPP_10G_FIBER, .ver_addr = 0, .req_flow_ctrl = 0, .req_line_speed = 0, @@ -11030,12 +11558,12 @@ static struct bnx2x_phy phy_8706 = { .phy_specific_func = (phy_specific_func_t)NULL }; -static struct bnx2x_phy phy_8726 = { +static const struct bnx2x_phy phy_8726 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, .addr = 0xff, .def_md_devad = 0, - .flags = (FLAGS_HW_LOCK_REQUIRED | - FLAGS_INIT_XGXS_FIRST), + .flags = (FLAGS_INIT_XGXS_FIRST | + FLAGS_TX_ERROR_CHECK), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -11062,11 +11590,12 @@ static struct bnx2x_phy phy_8726 = { .phy_specific_func = (phy_specific_func_t)NULL }; -static struct bnx2x_phy phy_8727 = { +static const struct bnx2x_phy phy_8727 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, .addr = 0xff, .def_md_devad = 0, - .flags = FLAGS_FAN_FAILURE_DET_REQ, + .flags = (FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_TX_ERROR_CHECK), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -11091,7 +11620,7 @@ static struct bnx2x_phy phy_8727 = { .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led, .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func }; -static struct bnx2x_phy phy_8481 = { +static const struct bnx2x_phy phy_8481 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, .addr = 0xff, .def_md_devad = 0, @@ -11127,12 +11656,13 @@ static struct bnx2x_phy phy_8481 = { .phy_specific_func = (phy_specific_func_t)NULL }; -static struct bnx2x_phy phy_84823 = { +static const struct bnx2x_phy phy_84823 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823, .addr = 0xff, .def_md_devad = 0, - .flags = FLAGS_FAN_FAILURE_DET_REQ | - FLAGS_REARM_LATCH_SIGNAL, + .flags = (FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_REARM_LATCH_SIGNAL | + FLAGS_TX_ERROR_CHECK), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -11160,13 +11690,48 @@ static struct bnx2x_phy phy_84823 = { .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL + .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func }; -static struct bnx2x_phy phy_84833 = { +static const struct bnx2x_phy phy_84833 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, .addr = 0xff, .def_md_devad = 0, + .flags = (FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_REARM_LATCH_SIGNAL | + FLAGS_TX_ERROR_CHECK), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_848x3_config_init, + .read_status = (read_status_t)bnx2x_848xx_read_status, + .link_reset = (link_reset_t)bnx2x_848x3_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, + .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, + .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func +}; + +static const struct bnx2x_phy phy_84834 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834, + .addr = 0xff, + .def_md_devad = 0, .flags = FLAGS_FAN_FAILURE_DET_REQ | FLAGS_REARM_LATCH_SIGNAL, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, @@ -11194,10 +11759,10 @@ static struct bnx2x_phy phy_84833 = { .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL + .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func }; -static struct bnx2x_phy phy_54618se = { +static const struct bnx2x_phy phy_54618se = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE, .addr = 0xff, .def_md_devad = 0, @@ -11228,7 +11793,7 @@ static struct bnx2x_phy phy_54618se = { .format_fw_ver = (format_fw_ver_t)NULL, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL + .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func }; /*****************************************************************/ /* */ @@ -11243,9 +11808,8 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base, /* Get the 4 lanes xgxs config rx and tx */ u32 rx = 0, tx = 0, i; for (i = 0; i < 2; i++) { - /* - * INT_PHY and EXT_PHY1 share the same value location in the - * shmem. When num_phys is greater than 1, than this value + /* INT_PHY and EXT_PHY1 share the same value location in + * the shmem. When num_phys is greater than 1, than this value * applies only to EXT_PHY1 */ if (phy_index == INT_PHY || phy_index == EXT_PHY1) { @@ -11323,8 +11887,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, offsetof(struct shmem_region, dev_info. port_hw_config[port].default_cfg)) & PORT_HW_CFG_NET_SERDES_IF_MASK); - /* - * Set the appropriate supported and flags indications per + /* Set the appropriate supported and flags indications per * interface type of the chip */ switch (serdes_net_if) { @@ -11341,6 +11904,11 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, phy->media_type = ETH_PHY_BASE_T; break; case PORT_HW_CFG_NET_SERDES_IF_XFI: + phy->supported &= (SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); phy->media_type = ETH_PHY_XFP_FIBER; break; case PORT_HW_CFG_NET_SERDES_IF_SFI: @@ -11349,7 +11917,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, SUPPORTED_FIBRE | SUPPORTED_Pause | SUPPORTED_Asym_Pause); - phy->media_type = ETH_PHY_SFP_FIBER; + phy->media_type = ETH_PHY_SFPP_10G_FIBER; break; case PORT_HW_CFG_NET_SERDES_IF_KR: phy->media_type = ETH_PHY_KR; @@ -11372,9 +11940,13 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, phy->media_type = ETH_PHY_KR; phy->flags |= FLAGS_WC_DUAL_MODE; phy->supported &= (SUPPORTED_20000baseKR2_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_FIBRE | SUPPORTED_Pause | SUPPORTED_Asym_Pause); + phy->flags &= ~FLAGS_TX_ERROR_CHECK; break; default: DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n", @@ -11382,8 +11954,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, break; } - /* - * Enable MDC/MDIO work-around for E3 A0 since free running MDC + /* Enable MDC/MDIO work-around for E3 A0 since free running MDC * was not set as expected. For B0, ECO will be enabled so there * won't be an issue there */ @@ -11474,9 +12045,14 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: *phy = phy_84833; break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834: + *phy = phy_84834; + break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE: *phy = phy_54618se; + if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) + phy->flags |= FLAGS_EEE; break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: *phy = phy_7101; @@ -11496,8 +12072,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config); bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); - /* - * The shmem address of the phy version is located on different + /* The shmem address of the phy version is located on different * structures. In case this structure is too old, do not set * the address */ @@ -11529,10 +12104,10 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, } phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && + if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) && (phy->ver_addr)) { - /* - * Remove 100Mb link supported for BCM84833 when phy fw + /* Remove 100Mb link supported for BCM84833/4 when phy fw * version lower than or equal to 1.39 */ u32 raw_ver = REG_RD(bp, phy->ver_addr); @@ -11542,13 +12117,6 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, SUPPORTED_100baseT_Full); } - /* - * In case mdc/mdio_access of the external phy is different than the - * mdc/mdio access of the XGXS, a HW lock must be taken in each access - * to prevent one port interfere with another port's CL45 operations. - */ - if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH) - phy->flags |= FLAGS_HW_LOCK_REQUIRED; DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n", phy_type, port, phy_index); DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n", @@ -11673,7 +12241,6 @@ u32 bnx2x_phy_selection(struct link_params *params) return return_cfg; } - int bnx2x_phy_probe(struct link_params *params) { u8 phy_index, actual_phy_idx; @@ -11713,13 +12280,20 @@ int bnx2x_phy_probe(struct link_params *params) if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) break; + if (params->feature_config_flags & + FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET) + phy->flags &= ~FLAGS_TX_ERROR_CHECK; + + if (!(params->feature_config_flags & + FEATURE_CONFIG_MT_SUPPORT)) + phy->flags |= FLAGS_MDC_MDIO_WA_G; + sync_offset = params->shmem_base + offsetof(struct shmem_region, dev_info.port_hw_config[params->port].media_type); media_types = REG_RD(bp, sync_offset); - /* - * Update media type for non-PMF sync only for the first time + /* Update media type for non-PMF sync only for the first time * In case the media type changes afterwards, it will be updated * using the update_status function */ @@ -11741,8 +12315,8 @@ int bnx2x_phy_probe(struct link_params *params) return 0; } -void bnx2x_init_bmac_loopback(struct link_params *params, - struct link_vars *vars) +static void bnx2x_init_bmac_loopback(struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; vars->link_up = 1; @@ -11755,14 +12329,14 @@ void bnx2x_init_bmac_loopback(struct link_params *params, bnx2x_xgxs_deassert(params); - /* set bmac loopback */ - bnx2x_bmac_enable(params, vars, 1); + /* Set bmac loopback */ + bnx2x_bmac_enable(params, vars, 1, 1); REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); } -void bnx2x_init_emac_loopback(struct link_params *params, - struct link_vars *vars) +static void bnx2x_init_emac_loopback(struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; vars->link_up = 1; @@ -11774,14 +12348,14 @@ void bnx2x_init_emac_loopback(struct link_params *params, vars->phy_flags = PHY_XGXS_FLAG; bnx2x_xgxs_deassert(params); - /* set bmac loopback */ + /* Set bmac loopback */ bnx2x_emac_enable(params, vars, 1); bnx2x_emac_program(params, vars); REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); } -void bnx2x_init_xmac_loopback(struct link_params *params, - struct link_vars *vars) +static void bnx2x_init_xmac_loopback(struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; vars->link_up = 1; @@ -11793,8 +12367,7 @@ void bnx2x_init_xmac_loopback(struct link_params *params, vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; vars->mac_type = MAC_TYPE_XMAC; vars->phy_flags = PHY_XGXS_FLAG; - /* - * Set WC to loopback mode since link is required to provide clock + /* Set WC to loopback mode since link is required to provide clock * to the XMAC in 20G mode */ bnx2x_set_aer_mmd(params, ¶ms->phy[0]); @@ -11807,8 +12380,8 @@ void bnx2x_init_xmac_loopback(struct link_params *params, REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); } -void bnx2x_init_umac_loopback(struct link_params *params, - struct link_vars *vars) +static void bnx2x_init_umac_loopback(struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; vars->link_up = 1; @@ -11822,17 +12395,21 @@ void bnx2x_init_umac_loopback(struct link_params *params, REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); } -void bnx2x_init_xgxs_loopback(struct link_params *params, - struct link_vars *vars) +static void bnx2x_init_xgxs_loopback(struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; - vars->link_up = 1; - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - vars->duplex = DUPLEX_FULL; + struct bnx2x_phy *int_phy = ¶ms->phy[INT_PHY]; + vars->link_up = 1; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->duplex = DUPLEX_FULL; if (params->req_line_speed[0] == SPEED_1000) - vars->line_speed = SPEED_1000; + vars->line_speed = SPEED_1000; + else if ((params->req_line_speed[0] == SPEED_20000) || + (int_phy->flags & FLAGS_WC_DUAL_MODE)) + vars->line_speed = SPEED_20000; else - vars->line_speed = SPEED_10000; + vars->line_speed = SPEED_10000; if (!USES_WARPCORE(bp)) bnx2x_xgxs_deassert(params); @@ -11849,39 +12426,190 @@ void bnx2x_init_xgxs_loopback(struct link_params *params, if (USES_WARPCORE(bp)) bnx2x_xmac_enable(params, vars, 0); else - bnx2x_bmac_enable(params, vars, 0); + bnx2x_bmac_enable(params, vars, 0, 1); } - if (params->loopback_mode == LOOPBACK_XGXS) { - /* set 10G XGXS loopback */ - params->phy[INT_PHY].config_loopback( - ¶ms->phy[INT_PHY], - params); + if (params->loopback_mode == LOOPBACK_XGXS) { + /* Set 10G XGXS loopback */ + int_phy->config_loopback(int_phy, params); + } else { + /* Set external phy loopback */ + u8 phy_index; + for (phy_index = EXT_PHY1; + phy_index < params->num_phys; phy_index++) + if (params->phy[phy_index].config_loopback) + params->phy[phy_index].config_loopback( + ¶ms->phy[phy_index], + params); + } + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); - } else { - /* set external phy loopback */ - u8 phy_index; - for (phy_index = EXT_PHY1; - phy_index < params->num_phys; phy_index++) { - if (params->phy[phy_index].config_loopback) - params->phy[phy_index].config_loopback( - ¶ms->phy[phy_index], - params); - } + bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); +} + +void bnx2x_set_rx_filter(struct link_params *params, u8 en) +{ + struct bnx2x *bp = params->bp; + u8 val = en * 0x1F; + + /* Open / close the gate between the NIG and the BRB */ + if (!CHIP_IS_E1x(bp)) + val |= en * 0x20; + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val); + + if (!CHIP_IS_E1(bp)) { + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port*4, + en*0x3); + } + + REG_WR(bp, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), en); +} +static int bnx2x_avoid_link_flap(struct link_params *params, + struct link_vars *vars) +{ + u32 phy_idx; + u32 dont_clear_stat, lfa_sts; + struct bnx2x *bp = params->bp; + + bnx2x_set_mdio_emac_per_phy(bp, params); + /* Sync the link parameters */ + bnx2x_link_status_update(params, vars); + + /* + * The module verification was already done by previous link owner, + * so this call is meant only to get warning message + */ + + for (phy_idx = INT_PHY; phy_idx < params->num_phys; phy_idx++) { + struct bnx2x_phy *phy = ¶ms->phy[phy_idx]; + if (phy->phy_specific_func) { + DP(NETIF_MSG_LINK, "Calling PHY specific func\n"); + phy->phy_specific_func(phy, params, PHY_INIT); } - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + if ((phy->media_type == ETH_PHY_SFPP_10G_FIBER) || + (phy->media_type == ETH_PHY_SFP_1G_FIBER) || + (phy->media_type == ETH_PHY_DA_TWINAX)) + bnx2x_verify_sfp_module(phy, params); + } + lfa_sts = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, + lfa_sts)); - bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); + dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT; + + /* Re-enable the NIG/MAC */ + if (CHIP_IS_E3(bp)) { + if (!dont_clear_stat) { + REG_WR(bp, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_MSTAT0 << + params->port)); + REG_WR(bp, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_MSTAT0 << + params->port)); + } + if (vars->line_speed < SPEED_10000) + bnx2x_umac_enable(params, vars, 0); + else + bnx2x_xmac_enable(params, vars, 0); + } else { + if (vars->line_speed < SPEED_10000) + bnx2x_emac_enable(params, vars, 0); + else + bnx2x_bmac_enable(params, vars, 0, !dont_clear_stat); + } + + /* Increment LFA count */ + lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) | + (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >> + LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff) + << LINK_FLAP_AVOIDANCE_COUNT_OFFSET)); + /* Clear link flap reason */ + lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK; + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts), lfa_sts); + + /* Disable NIG DRAIN */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + + /* Enable interrupts */ + bnx2x_link_int_enable(params); + return 0; +} + +static void bnx2x_cannot_avoid_link_flap(struct link_params *params, + struct link_vars *vars, + int lfa_status) +{ + u32 lfa_sts, cfg_idx, tmp_val; + struct bnx2x *bp = params->bp; + + bnx2x_link_reset(params, vars, 1); + + if (!params->lfa_base) + return; + /* Store the new link parameters */ + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_duplex), + params->req_duplex[0] | (params->req_duplex[1] << 16)); + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_flow_ctrl), + params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16)); + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_line_speed), + params->req_line_speed[0] | (params->req_line_speed[1] << 16)); + + for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) { + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, + speed_cap_mask[cfg_idx]), + params->speed_cap_mask[cfg_idx]); + } + + tmp_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)); + tmp_val &= ~REQ_FC_AUTO_ADV_MASK; + tmp_val |= params->req_fc_auto_adv; + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config), tmp_val); + + lfa_sts = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts)); + + /* Clear the "Don't Clear Statistics" bit, and set reason */ + lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT; + + /* Set link flap reason */ + lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK; + lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) << + LFA_LINK_FLAP_REASON_OFFSET); + + /* Increment link flap counter */ + lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) | + (((((lfa_sts & LINK_FLAP_COUNT_MASK) >> + LINK_FLAP_COUNT_OFFSET) + 1) & 0xff) + << LINK_FLAP_COUNT_OFFSET)); + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts), lfa_sts); + /* Proceed with regular link initialization */ } int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) { + int lfa_status; struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "Phy Initialization started\n"); DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n", params->req_line_speed[0], params->req_flow_ctrl[0]); DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n", params->req_line_speed[1], params->req_flow_ctrl[1]); + DP(NETIF_MSG_LINK, "req_adv_flow_ctrl 0x%x\n", params->req_fc_auto_adv); vars->link_status = 0; vars->phy_link_up = 0; vars->link_up = 0; @@ -11890,8 +12618,23 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; vars->mac_type = MAC_TYPE_NONE; vars->phy_flags = 0; + vars->check_kr2_recovery_cnt = 0; + params->link_flags = PHY_INITIALIZED; + /* Driver opens NIG-BRB filters */ + bnx2x_set_rx_filter(params, 1); + /* Check if link flap can be avoided */ + lfa_status = bnx2x_check_lfa(params); - /* disable attentions */ + if (lfa_status == 0) { + DP(NETIF_MSG_LINK, "Link Flap Avoidance in progress\n"); + return bnx2x_avoid_link_flap(params, vars); + } + + DP(NETIF_MSG_LINK, "Cannot avoid link flap lfa_sta=0x%x\n", + lfa_status); + bnx2x_cannot_avoid_link_flap(params, vars, lfa_status); + + /* Disable attentions */ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, (NIG_MASK_XGXS0_LINK_STATUS | NIG_MASK_XGXS0_LINK10G | @@ -11900,6 +12643,9 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) bnx2x_emac_init(params, vars); + if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) + vars->link_status |= LINK_STATUS_PFC_ENABLED; + if (params->num_phys == 0) { DP(NETIF_MSG_LINK, "No phy found for initialization !!\n"); return -EINVAL; @@ -11936,6 +12682,9 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) bnx2x_link_int_enable(params); break; } + bnx2x_update_mng(params, vars->link_status); + + bnx2x_update_mng_eee(params, vars->eee_status); return 0; } @@ -11945,44 +12694,46 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, struct bnx2x *bp = params->bp; u8 phy_index, port = params->port, clear_latch_ind = 0; DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); - /* disable attentions */ + /* Disable attentions */ vars->link_status = 0; bnx2x_update_mng(params, vars->link_status); + vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | + SHMEM_EEE_ACTIVE_BIT); + bnx2x_update_mng_eee(params, vars->eee_status); bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, (NIG_MASK_XGXS0_LINK_STATUS | NIG_MASK_XGXS0_LINK10G | NIG_MASK_SERDES0_LINK_STATUS | NIG_MASK_MI_INT)); - /* activate nig drain */ + /* Activate nig drain */ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); - /* disable nig egress interface */ + /* Disable nig egress interface */ if (!CHIP_IS_E3(bp)) { REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); } - /* Stop BigMac rx */ - if (!CHIP_IS_E3(bp)) - bnx2x_bmac_rx_disable(bp, port); - else { - bnx2x_xmac_disable(params); - bnx2x_umac_disable(params); - } - /* disable emac */ + if (!CHIP_IS_E3(bp)) { + bnx2x_set_bmac_rx(bp, params->chip_id, port, 0); + } else { + bnx2x_set_xmac_rxtx(params, 0); + bnx2x_set_umac_rxtx(params, 0); + } + /* Disable emac */ if (!CHIP_IS_E3(bp)) REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); - msleep(10); + usleep_range(10000, 20000); /* The PHY reset is controlled by GPIO 1 * Hold it as vars low */ - /* clear link led */ + /* Clear link led */ + bnx2x_set_mdio_emac_per_phy(bp, params); bnx2x_set_led(params, vars, LED_MODE_OFF, 0); if (reset_ext_phy) { - bnx2x_set_mdio_clk(bp, params->chip_id, port); for (phy_index = EXT_PHY1; phy_index < params->num_phys; phy_index++) { if (params->phy[phy_index].link_reset) { @@ -12008,9 +12759,9 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, params->phy[INT_PHY].link_reset( ¶ms->phy[INT_PHY], params); - /* disable nig ingress interface */ + /* Disable nig ingress interface */ if (!CHIP_IS_E3(bp)) { - /* reset BigMac */ + /* Reset BigMac */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); @@ -12027,6 +12778,57 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, vars->phy_flags = 0; return 0; } +int bnx2x_lfa_reset(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + vars->link_up = 0; + vars->phy_flags = 0; + params->link_flags &= ~PHY_INITIALIZED; + if (!params->lfa_base) + return bnx2x_link_reset(params, vars, 1); + /* + * Activate NIG drain so that during this time the device won't send + * anything while it is unable to response. + */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); + + /* + * Close gracefully the gate from BMAC to NIG such that no half packets + * are passed. + */ + if (!CHIP_IS_E3(bp)) + bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0); + + if (CHIP_IS_E3(bp)) { + bnx2x_set_xmac_rxtx(params, 0); + bnx2x_set_umac_rxtx(params, 0); + } + /* Wait 10ms for the pipe to clean up*/ + usleep_range(10000, 20000); + + /* Clean the NIG-BRB using the network filters in a way that will + * not cut a packet in the middle. + */ + bnx2x_set_rx_filter(params, 0); + + /* + * Re-open the gate between the BMAC and the NIG, after verifying the + * gate to the BRB is closed, otherwise packets may arrive to the + * firmware before driver had initialized it. The target is to achieve + * minimum management protocol down time. + */ + if (!CHIP_IS_E3(bp)) + bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 1); + + if (CHIP_IS_E3(bp)) { + bnx2x_set_xmac_rxtx(params, 1); + bnx2x_set_umac_rxtx(params, 1); + } + /* Disable NIG drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + return 0; +} /****************************************************************************/ /* Common function */ @@ -12067,7 +12869,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp, DP(NETIF_MSG_LINK, "populate_phy failed\n"); return -EINVAL; } - /* disable attentions */ + /* Disable attentions */ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port_of_path*4, (NIG_MASK_XGXS0_LINK_STATUS | @@ -12076,7 +12878,8 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp, NIG_MASK_MI_INT)); /* Need to take the phy out of low power mode in order - to write to access its registers */ + * to write to access its registers + */ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); @@ -12124,8 +12927,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp, (val | 1<<10)); } - /* - * Toggle Transmitter: Power down and then up with 600ms delay + /* Toggle Transmitter: Power down and then up with 600ms delay * between */ msleep(600); @@ -12141,7 +12943,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp, bnx2x_cl45_write(bp, phy_blk[port], MDIO_PMA_DEVAD, MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); - msleep(15); + usleep_range(15000, 30000); /* Read modify write the SPI-ROM version select register */ bnx2x_cl45_read(bp, phy_blk[port], @@ -12173,7 +12975,7 @@ static int bnx2x_8726_common_init_phy(struct bnx2x *bp, REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); bnx2x_ext_phy_hw_reset(bp, 0); - msleep(5); + usleep_range(5000, 10000); for (port = 0; port < PORT_MAX; port++) { u32 shmem_base, shmem2_base; @@ -12268,8 +13070,7 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp, reset_gpio = MISC_REGISTERS_GPIO_1; port = 1; - /* - * Retrieve the reset gpio/port which control the reset. + /* Retrieve the reset gpio/port which control the reset. * Default is GPIO1, PORT1 */ bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0], @@ -12281,11 +13082,11 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp, /* Initiate PHY reset*/ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW, port); - msleep(1); + usleep_range(1000, 2000); bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); - msleep(5); + usleep_range(5000, 10000); /* PART1 - Reset both phys */ for (port = PORT_MAX - 1; port >= PORT_0; port--) { @@ -12368,61 +13169,6 @@ static int bnx2x_84833_common_init_phy(struct bnx2x *bp, return 0; } -static int bnx2x_84833_pre_init_phy(struct bnx2x *bp, - struct bnx2x_phy *phy) -{ - u16 val, cnt; - /* Wait for FW completing its initialization. */ - for (cnt = 0; cnt < 1500; cnt++) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_CTRL, &val); - if (!(val & (1<<15))) - break; - msleep(1); - } - if (cnt >= 1500) { - DP(NETIF_MSG_LINK, "84833 reset timeout\n"); - return -EINVAL; - } - - /* Put the port in super isolate mode. */ - bnx2x_cl45_read(bp, phy, - MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val); - val |= MDIO_84833_SUPER_ISOLATE; - bnx2x_cl45_write(bp, phy, - MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); - - /* Save spirom version */ - bnx2x_save_848xx_spirom_version(phy, bp, PORT_0); - return 0; -} - -int bnx2x_pre_init_phy(struct bnx2x *bp, - u32 shmem_base, - u32 shmem2_base, - u32 chip_id) -{ - int rc = 0; - struct bnx2x_phy phy; - bnx2x_set_mdio_clk(bp, chip_id, PORT_0); - if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base, - PORT_0, &phy)) { - DP(NETIF_MSG_LINK, "populate_phy failed\n"); - return -EINVAL; - } - switch (phy.type) { - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: - rc = bnx2x_84833_pre_init_phy(bp, &phy); - break; - default: - break; - } - return rc; -} - static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], u32 shmem2_base_path[], u8 phy_index, u32 ext_phy_type, u32 chip_id) @@ -12444,8 +13190,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: - /* - * GPIO1 affects both ports, so there's need to pull + /* GPIO1 affects both ports, so there's need to pull * it for single port alone */ rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, @@ -12453,8 +13198,8 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], phy_index, chip_id); break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: - /* - * GPIO3's are linked, and so both need to be toggled + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834: + /* GPIO3's are linked, and so both need to be toggled * to obtain required 2us pulse. */ rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, @@ -12471,7 +13216,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], break; } - if (rc != 0) + if (rc) netdev_err(bp->dev, "Warning: PHY was not initialized," " Port %d\n", 0); @@ -12485,8 +13230,9 @@ int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], u32 phy_ver, val; u8 phy_index = 0; u32 ext_phy_type, ext_phy_config; - bnx2x_set_mdio_clk(bp, chip_id, PORT_0); - bnx2x_set_mdio_clk(bp, chip_id, PORT_1); + + bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC0); + bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC1); DP(NETIF_MSG_LINK, "Begin common phy init\n"); if (CHIP_IS_E3(bp)) { /* Enable EPIO */ @@ -12547,47 +13293,69 @@ static void bnx2x_check_over_curr(struct link_params *params, " error.\n", params->port); vars->phy_flags |= PHY_OVER_CURRENT_FLAG; + bnx2x_warpcore_power_module(params, 0); } } else vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; } -static void bnx2x_analyze_link_error(struct link_params *params, - struct link_vars *vars, u32 lss_status) +/* Returns 0 if no change occured since last check; 1 otherwise. */ +static u8 bnx2x_analyze_link_error(struct link_params *params, + struct link_vars *vars, u32 status, + u32 phy_flag, u32 link_flag, u8 notify) { struct bnx2x *bp = params->bp; /* Compare new value with previous value */ u8 led_mode; - u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0; + u32 old_status = (vars->phy_flags & phy_flag) ? 1 : 0; - if ((lss_status ^ half_open_conn) == 0) - return; + if ((status ^ old_status) == 0) + return 0; /* If values differ */ - DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up, - half_open_conn, lss_status); + switch (phy_flag) { + case PHY_HALF_OPEN_CONN_FLAG: + DP(NETIF_MSG_LINK, "Analyze Remote Fault\n"); + break; + case PHY_SFP_TX_FAULT_FLAG: + DP(NETIF_MSG_LINK, "Analyze TX Fault\n"); + break; + default: + DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n"); + } + DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, + old_status, status); - /* - * a. Update shmem->link_status accordingly + /* Do not touch the link in case physical link down */ + if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) + return 1; + + /* a. Update shmem->link_status accordingly * b. Update link_vars->link_up */ - if (lss_status) { - DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n"); + if (status) { vars->link_status &= ~LINK_STATUS_LINK_UP; + vars->link_status |= link_flag; vars->link_up = 0; - vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; - /* - * Set LED mode to off since the PHY doesn't know about these + vars->phy_flags |= phy_flag; + + /* activate nig drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); + /* Set LED mode to off since the PHY doesn't know about these * errors */ led_mode = LED_MODE_OFF; } else { - DP(NETIF_MSG_LINK, "Remote Fault cleared\n"); vars->link_status |= LINK_STATUS_LINK_UP; + vars->link_status &= ~link_flag; vars->link_up = 1; - vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; + vars->phy_flags &= ~phy_flag; led_mode = LED_MODE_OPER; + + /* Clear nig drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); } + bnx2x_sync_link(params, vars); /* Update the LED according to the link state */ bnx2x_set_led(params, vars, led_mode, SPEED_10000); @@ -12596,7 +13364,10 @@ static void bnx2x_analyze_link_error(struct link_params *params, /* C. Trigger General Attention */ vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT; - bnx2x_notify_link_changed(bp); + if (notify) + bnx2x_notify_link_changed(bp); + + return 1; } /****************************************************************************** @@ -12608,22 +13379,23 @@ static void bnx2x_analyze_link_error(struct link_params *params, * a fault, for example, due to break in the TX side of fiber. * ******************************************************************************/ -static void bnx2x_check_half_open_conn(struct link_params *params, - struct link_vars *vars) +static int bnx2x_check_half_open_conn(struct link_params *params, + struct link_vars *vars, + u8 notify) { struct bnx2x *bp = params->bp; u32 lss_status = 0; u32 mac_base; /* In case link status is physically up @ 10G do */ - if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) - return; + if (((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) || + (REG_RD(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4))) + return 0; if (CHIP_IS_E3(bp) && (REG_RD(bp, MISC_REG_RESET_REG_2) & (MISC_REGISTERS_RESET_REG_2_XMAC))) { /* Check E3 XMAC */ - /* - * Note that link speed cannot be queried here, since it may be + /* Note that link speed cannot be queried here, since it may be * zero while link is down. In case UMAC is active, LSS will * simply not be set */ @@ -12637,7 +13409,9 @@ static void bnx2x_check_half_open_conn(struct link_params *params, if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) lss_status = 1; - bnx2x_analyze_link_error(params, vars, lss_status); + bnx2x_analyze_link_error(params, vars, lss_status, + PHY_HALF_OPEN_CONN_FLAG, + LINK_STATUS_NONE, notify); } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { /* Check E1X / E2 BMAC */ @@ -12654,18 +13428,148 @@ static void bnx2x_check_half_open_conn(struct link_params *params, REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); lss_status = (wb_data[0] > 0); - bnx2x_analyze_link_error(params, vars, lss_status); + bnx2x_analyze_link_error(params, vars, lss_status, + PHY_HALF_OPEN_CONN_FLAG, + LINK_STATUS_NONE, notify); } + return 0; } +static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u32 cfg_pin, value = 0; + u8 led_change, port = params->port; -void bnx2x_period_func(struct link_params *params, struct link_vars *vars) + /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */ + cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & + PORT_HW_CFG_E3_TX_FAULT_MASK) >> + PORT_HW_CFG_E3_TX_FAULT_SHIFT; + + if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) { + DP(NETIF_MSG_LINK, "Failed to read pin 0x%02x\n", cfg_pin); + return; + } + + led_change = bnx2x_analyze_link_error(params, vars, value, + PHY_SFP_TX_FAULT_FLAG, + LINK_STATUS_SFP_TX_FAULT, 1); + + if (led_change) { + /* Change TX_Fault led, set link status for further syncs */ + u8 led_mode; + + if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) { + led_mode = MISC_REGISTERS_GPIO_HIGH; + vars->link_status |= LINK_STATUS_SFP_TX_FAULT; + } else { + led_mode = MISC_REGISTERS_GPIO_LOW; + vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT; + } + + /* If module is unapproved, led should be on regardless */ + if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) { + DP(NETIF_MSG_LINK, "Change TX_Fault LED: ->%x\n", + led_mode); + bnx2x_set_e3_module_fault_led(params, led_mode); + } + } +} +static void bnx2x_kr2_recovery(struct link_params *params, + struct link_vars *vars, + struct bnx2x_phy *phy) { struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "KR2 recovery\n"); + bnx2x_warpcore_enable_AN_KR2(phy, params, vars); + bnx2x_warpcore_restart_AN_KR(phy, params); +} + +static void bnx2x_check_kr2_wa(struct link_params *params, + struct link_vars *vars, + struct bnx2x_phy *phy) +{ + struct bnx2x *bp = params->bp; + u16 base_page, next_page, not_kr2_device, lane; + int sigdet; + + /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery + * Since some switches tend to reinit the AN process and clear the + * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled + * and recovered many times + */ + if (vars->check_kr2_recovery_cnt > 0) { + vars->check_kr2_recovery_cnt--; + return; + } + + sigdet = bnx2x_warpcore_get_sigdet(phy, params); + if (!sigdet) { + if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + bnx2x_kr2_recovery(params, vars, phy); + DP(NETIF_MSG_LINK, "No sigdet\n"); + } + return; + } + + lane = bnx2x_get_warpcore_lane(phy, params); + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, lane); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &base_page); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG2, &next_page); + bnx2x_set_aer_mmd(params, phy); + + /* CL73 has not begun yet */ + if (base_page == 0) { + if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + bnx2x_kr2_recovery(params, vars, phy); + DP(NETIF_MSG_LINK, "No BP\n"); + } + return; + } + + /* In case NP bit is not set in the BasePage, or it is set, + * but only KX is advertised, declare this link partner as non-KR2 + * device. + */ + not_kr2_device = (((base_page & 0x8000) == 0) || + (((base_page & 0x8000) && + ((next_page & 0xe0) == 0x20)))); + + /* In case KR2 is already disabled, check if we need to re-enable it */ + if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + if (!not_kr2_device) { + DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, + next_page); + bnx2x_kr2_recovery(params, vars, phy); + } + return; + } + /* KR2 is enabled, but not KR2 device */ + if (not_kr2_device) { + /* Disable KR2 on both lanes */ + DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page); + bnx2x_disable_kr2(params, vars, phy); + /* Restart AN on leading lane */ + bnx2x_warpcore_restart_AN_KR(phy, params); + return; + } +} + +void bnx2x_period_func(struct link_params *params, struct link_vars *vars) +{ u16 phy_idx; + struct bnx2x *bp = params->bp; for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { bnx2x_set_aer_mmd(params, ¶ms->phy[phy_idx]); - bnx2x_check_half_open_conn(params, vars); + if (bnx2x_check_half_open_conn(params, vars, 1) != + 0) + DP(NETIF_MSG_LINK, "Fault detection failed\n"); break; } } @@ -12673,28 +13577,30 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars) if (CHIP_IS_E3(bp)) { struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; bnx2x_set_aer_mmd(params, phy); + if ((phy->supported & SUPPORTED_20000baseKR2_Full) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) + bnx2x_check_kr2_wa(params, vars, phy); bnx2x_check_over_curr(params, vars); - bnx2x_warpcore_config_runtime(phy, params, vars); - } - -} - -u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base) -{ - u8 phy_index; - struct bnx2x_phy phy; - for (phy_index = INT_PHY; phy_index < MAX_PHYS; - phy_index++) { - if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, - 0, &phy) != 0) { - DP(NETIF_MSG_LINK, "populate phy failed\n"); - return 0; + if (vars->rx_tx_asic_rst) + bnx2x_warpcore_config_runtime(phy, params, vars); + + if ((REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) + & PORT_HW_CFG_NET_SERDES_IF_MASK) == + PORT_HW_CFG_NET_SERDES_IF_SFI) { + if (bnx2x_is_sfp_module_plugged(phy, params)) { + bnx2x_sfp_tx_fault_detection(phy, params, vars); + } else if (vars->link_status & + LINK_STATUS_SFP_TX_FAULT) { + /* Clean trail, interrupt corrects the leds */ + vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT; + vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG; + /* Update link status in the shared memory */ + bnx2x_update_mng(params, vars->link_status); + } } - - if (phy.flags & FLAGS_HW_LOCK_REQUIRED) - return 1; } - return 0; } u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index e02a68a7fb8..389f5f8cb0a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -1,4 +1,4 @@ -/* Copyright 2008-2011 Broadcom Corporation +/* Copyright 2008-2013 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -41,6 +41,10 @@ #define SPEED_AUTO_NEG 0 #define SPEED_20000 20000 +#define I2C_DEV_ADDR_A0 0xa0 +#define I2C_DEV_ADDR_A2 0xa2 + +#define SFP_EEPROM_PAGE_SIZE 16 #define SFP_EEPROM_VENDOR_NAME_ADDR 0x14 #define SFP_EEPROM_VENDOR_NAME_SIZE 16 #define SFP_EEPROM_VENDOR_OUI_ADDR 0x25 @@ -53,6 +57,15 @@ #define SFP_EEPROM_SERIAL_SIZE 16 #define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */ #define SFP_EEPROM_DATE_SIZE 6 +#define SFP_EEPROM_DIAG_TYPE_ADDR 0x5c +#define SFP_EEPROM_DIAG_TYPE_SIZE 1 +#define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2) +#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e +#define SFP_EEPROM_SFF_8472_COMP_SIZE 1 + +#define SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e +#define SFP_EEPROM_A2_CC_DMI_ADDR 0x5f + #define PWR_FLT_ERR_MSG_LEN 250 #define XGXS_EXT_PHY_TYPE(ext_phy_config) \ @@ -89,6 +102,8 @@ #define PFC_BRB_FULL_LB_XON_THRESHOLD 250 #define MAXVAL(a, b) (((a) > (b)) ? (a) : (b)) + +#define BMAC_CONTROL_RX_ENABLE 2 /***********************************************************/ /* Structs */ /***********************************************************/ @@ -123,6 +138,11 @@ typedef void (*set_link_led_t)(struct bnx2x_phy *phy, struct link_params *params, u8 mode); typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy, struct link_params *params, u32 action); +struct bnx2x_reg_set { + u8 devad; + u16 reg; + u16 val; +}; struct bnx2x_phy { u32 type; @@ -131,8 +151,6 @@ struct bnx2x_phy { u8 addr; u8 def_md_devad; u16 flags; - /* Require HW lock */ -#define FLAGS_HW_LOCK_REQUIRED (1<<0) /* No Over-Current detection */ #define FLAGS_NOC (1<<1) /* Fan failure detection required */ @@ -147,6 +165,8 @@ struct bnx2x_phy { #define FLAGS_DUMMY_READ (1<<9) #define FLAGS_MDC_MDIO_WA_B0 (1<<10) #define FLAGS_TX_ERROR_CHECK (1<<12) +#define FLAGS_EEE (1<<13) +#define FLAGS_MDC_MDIO_WA_G (1<<15) /* preemphasis values for the rx side */ u16 rx_preemphasis[4]; @@ -160,14 +180,15 @@ struct bnx2x_phy { u32 supported; u32 media_type; -#define ETH_PHY_UNSPECIFIED 0x0 -#define ETH_PHY_SFP_FIBER 0x1 -#define ETH_PHY_XFP_FIBER 0x2 -#define ETH_PHY_DA_TWINAX 0x3 -#define ETH_PHY_BASE_T 0x4 -#define ETH_PHY_KR 0xf0 -#define ETH_PHY_CX4 0xf1 -#define ETH_PHY_NOT_PRESENT 0xff +#define ETH_PHY_UNSPECIFIED 0x0 +#define ETH_PHY_SFPP_10G_FIBER 0x1 +#define ETH_PHY_XFP_FIBER 0x2 +#define ETH_PHY_DA_TWINAX 0x3 +#define ETH_PHY_BASE_T 0x4 +#define ETH_PHY_SFP_1G_FIBER 0x5 +#define ETH_PHY_KR 0xf0 +#define ETH_PHY_CX4 0xf1 +#define ETH_PHY_NOT_PRESENT 0xff /* The address in which version is located*/ u32 ver_addr; @@ -206,6 +227,7 @@ struct bnx2x_phy { phy_specific_func_t phy_specific_func; #define DISABLE_TX 1 #define ENABLE_TX 2 +#define PHY_INIT 3 }; /* Inputs parameters to the CLC */ @@ -252,8 +274,13 @@ struct link_params { #define FEATURE_CONFIG_PFC_ENABLED (1<<1) #define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) #define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) +#define FEATURE_CONFIG_BC_SUPPORTS_AFEX (1<<8) #define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9) #define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10) +#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11) +#define FEATURE_CONFIG_MT_SUPPORT (1<<13) +#define FEATURE_CONFIG_BOOT_FROM_SAN (1<<14) + /* Will be populated during common init */ struct bnx2x_phy phy[MAX_PHYS]; @@ -261,6 +288,30 @@ struct link_params { u8 num_phys; u8 rsrv; + + /* Used to configure the EEE Tx LPI timer, has several modes of + * operation, according to bits 29:28 - + * 2'b00: Timer will be configured by nvram, output will be the value + * from nvram. + * 2'b01: Timer will be configured by nvram, output will be in + * microseconds. + * 2'b10: bits 1:0 contain an nvram value which will be used instead + * of the one located in the nvram. Output will be that value. + * 2'b11: bits 19:0 contain the idle timer in microseconds; output + * will be in microseconds. + * Bits 31:30 should be 2'b11 in order for EEE to be enabled. + */ + u32 eee_mode; +#define EEE_MODE_NVRAM_BALANCED_TIME (0xa00) +#define EEE_MODE_NVRAM_AGGRESSIVE_TIME (0x100) +#define EEE_MODE_NVRAM_LATENCY_TIME (0x6000) +#define EEE_MODE_NVRAM_MASK (0x3) +#define EEE_MODE_TIMER_MASK (0xfffff) +#define EEE_MODE_OUTPUT_TIME (1<<28) +#define EEE_MODE_OVERRIDE_NVRAM (1<<29) +#define EEE_MODE_ENABLE_LPI (1<<30) +#define EEE_MODE_ADV_LPI (1<<31) + u16 hw_led_mode; /* part of the hw_config read from the shmem */ u32 multi_phy_config; @@ -268,6 +319,10 @@ struct link_params { struct bnx2x *bp; u16 req_fc_auto_adv; /* Should be set to TX / BOTH when req_flow_ctrl is set to AUTO */ + u16 link_flags; +#define LINK_FLAGS_INT_DISABLED (1<<0) +#define PHY_INITIALIZED (1<<1) + u32 lfa_base; }; /* Output parameters */ @@ -278,6 +333,7 @@ struct link_vars { #define PHY_PHYSICAL_LINK_FLAG (1<<2) #define PHY_HALF_OPEN_CONN_FLAG (1<<3) #define PHY_OVER_CURRENT_FLAG (1<<4) +#define PHY_SFP_TX_FAULT_FLAG (1<<5) u8 mac_type; #define MAC_TYPE_NONE 0 @@ -297,8 +353,10 @@ struct link_vars { /* The same definitions as the shmem parameter */ u32 link_status; + u32 eee_status; u8 fault_detected; - u8 rsrv1; + u8 check_kr2_recovery_cnt; +#define CHECK_KR2_RECOVERY_CNT 5 u16 periodic_flags; #define PERIODIC_FLAGS_LINK_EVENT 0x0001 @@ -306,6 +364,8 @@ struct link_vars { u8 rx_tx_asic_rst; u8 turn_to_run_wc_rt; u16 rsrv2; + /* The same definitions as the shmem2 parameter */ + u32 link_attr_sync; }; /***********************************************************/ @@ -318,7 +378,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars); to 0 */ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, u8 reset_ext_phy); - +int bnx2x_lfa_reset(struct link_params *params, struct link_vars *vars); /* bnx2x_link_update should be called upon link interrupt */ int bnx2x_link_update(struct link_params *params, struct link_vars *vars); @@ -337,8 +397,8 @@ int bnx2x_phy_write(struct link_params *params, u8 phy_addr, void bnx2x_link_status_update(struct link_params *input, struct link_vars *output); /* returns string representing the fw_version of the external phy */ -int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, - u8 *version, u16 len); +int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version, + u16 len); /* Set/Unset the led Basically, the CLC takes care of the led for the link, but in case one needs @@ -372,15 +432,11 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); /* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, u16 addr, - u8 byte_cnt, u8 *o_buf); + struct link_params *params, u8 dev_addr, + u16 addr, u16 byte_cnt, u8 *o_buf); void bnx2x_hw_reset_phy(struct link_params *params); -/* Checks if HW lock is required for this phy/board type */ -u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, - u32 shmem2_base); - /* Check swap bit and adjust PHY order */ u32 bnx2x_phy_selection(struct link_params *params); @@ -391,7 +447,8 @@ int bnx2x_phy_probe(struct link_params *params); u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base, u8 port); - +/* Open / close the gate between the NIG and the BRB */ +void bnx2x_set_rx_filter(struct link_params *params, u8 en); /* DCBX structs */ @@ -418,9 +475,6 @@ struct bnx2x_nig_brb_pfc_port_params { u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS]; u32 llfc_high_priority_classes; u32 llfc_low_priority_classes; - /* BRB */ - u32 cos0_pauseable; - u32 cos1_pauseable; }; @@ -455,8 +509,7 @@ struct bnx2x_ets_params { struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS]; }; -/** - * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB +/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB * when link is already up */ int bnx2x_update_pfc(struct link_params *params, @@ -480,17 +533,11 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); int bnx2x_ets_e3b0_config(const struct link_params *params, const struct link_vars *vars, struct bnx2x_ets_params *ets_params); -/* Read pfc statistic*/ -void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, - u32 pfc_frames_sent[2], - u32 pfc_frames_received[2]); + void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars, u32 chip_id, u32 shmem_base, u32 shmem2_base, u8 port); -int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, - struct link_params *params); - void bnx2x_period_func(struct link_params *params, struct link_vars *vars); #endif /* BNX2X_LINK_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 25452131915..6a8b1453a1b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1,12 +1,12 @@ /* bnx2x_main.c: Broadcom Everest network driver. * - * Copyright (c) 2007-2011 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver * UDP CSUM errata workaround by Arik Gendelman @@ -27,6 +27,7 @@ #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> +#include <linux/aer.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -39,7 +40,6 @@ #include <linux/time.h> #include <linux/ethtool.h> #include <linux/mii.h> -#include <linux/if.h> #include <linux/if_vlan.h> #include <net/ip.h> #include <net/ipv6.h> @@ -52,6 +52,7 @@ #include <linux/prefetch.h> #include <linux/zlib.h> #include <linux/io.h> +#include <linux/semaphore.h> #include <linux/stringify.h> #include <linux/vmalloc.h> @@ -59,6 +60,7 @@ #include "bnx2x_init.h" #include "bnx2x_init_ops.h" #include "bnx2x_cmn.h" +#include "bnx2x_vfpf.h" #include "bnx2x_dcb.h" #include "bnx2x_sp.h" @@ -77,7 +79,7 @@ /* Time in jiffies before concluding the transmitter is hung */ #define TX_TIMEOUT (5*HZ) -static char version[] __devinitdata = +static char version[] = "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; @@ -92,42 +94,45 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1); MODULE_FIRMWARE(FW_FILE_NAME_E1H); MODULE_FIRMWARE(FW_FILE_NAME_E2); -static int multi_mode = 1; -module_param(multi_mode, int, 0); -MODULE_PARM_DESC(multi_mode, " Multi queue mode " - "(0 Disable; 1 Enable (default))"); - -int num_queues; -module_param(num_queues, int, 0); -MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1" - " (default is as a number of CPUs)"); +int bnx2x_num_queues; +module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO); +MODULE_PARM_DESC(num_queues, + " Set number of queues (default is as a number of CPUs)"); static int disable_tpa; -module_param(disable_tpa, int, 0); +module_param(disable_tpa, int, S_IRUGO); MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); -#define INT_MODE_INTx 1 -#define INT_MODE_MSI 2 static int int_mode; -module_param(int_mode, int, 0); +module_param(int_mode, int, S_IRUGO); MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " "(1 INT#x; 2 MSI)"); static int dropless_fc; -module_param(dropless_fc, int, 0); +module_param(dropless_fc, int, S_IRUGO); MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); static int mrrs = -1; -module_param(mrrs, int, 0); +module_param(mrrs, int, S_IRUGO); MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); static int debug; -module_param(debug, int, 0); +module_param(debug, int, S_IRUGO); MODULE_PARM_DESC(debug, " Default debug msglevel"); - - -struct workqueue_struct *bnx2x_wq; +static struct workqueue_struct *bnx2x_wq; +struct workqueue_struct *bnx2x_iov_wq; + +struct bnx2x_mac_vals { + u32 xmac_addr; + u32 xmac_val; + u32 emac_addr; + u32 emac_val; + u32 umac_addr; + u32 umac_val; + u32 bmac_addr; + u32 bmac_val[2]; +}; enum bnx2x_board_type { BCM57710 = 0, @@ -135,30 +140,49 @@ enum bnx2x_board_type { BCM57711E, BCM57712, BCM57712_MF, + BCM57712_VF, BCM57800, BCM57800_MF, + BCM57800_VF, BCM57810, BCM57810_MF, - BCM57840, - BCM57840_MF + BCM57810_VF, + BCM57840_4_10, + BCM57840_2_20, + BCM57840_MF, + BCM57840_VF, + BCM57811, + BCM57811_MF, + BCM57840_O, + BCM57840_MFO, + BCM57811_VF }; /* indexed by board_type, above */ static struct { char *name; -} board_info[] __devinitdata = { - { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, - { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, - { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, - { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, - { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, - { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, - { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, - { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, - { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, - { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, - { "Broadcom NetXtreme II BCM57840 10/20 Gigabit " - "Ethernet Multi Function"} +} board_info[] = { + [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, + [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, + [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, + [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, + [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, + [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" }, + [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, + [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, + [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" }, + [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, + [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, + [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" }, + [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" }, + [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" }, + [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, + [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }, + [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" }, + [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" }, + [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, + [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, + [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" } }; #ifndef PCI_DEVICE_ID_NX2_57710 @@ -176,54 +200,105 @@ static struct { #ifndef PCI_DEVICE_ID_NX2_57712_MF #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF #endif +#ifndef PCI_DEVICE_ID_NX2_57712_VF +#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF +#endif #ifndef PCI_DEVICE_ID_NX2_57800 #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 #endif #ifndef PCI_DEVICE_ID_NX2_57800_MF #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF #endif +#ifndef PCI_DEVICE_ID_NX2_57800_VF +#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF +#endif #ifndef PCI_DEVICE_ID_NX2_57810 #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 #endif #ifndef PCI_DEVICE_ID_NX2_57810_MF #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF #endif -#ifndef PCI_DEVICE_ID_NX2_57840 -#define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840 +#ifndef PCI_DEVICE_ID_NX2_57840_O +#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE +#endif +#ifndef PCI_DEVICE_ID_NX2_57810_VF +#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF +#endif +#ifndef PCI_DEVICE_ID_NX2_57840_4_10 +#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10 +#endif +#ifndef PCI_DEVICE_ID_NX2_57840_2_20 +#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20 +#endif +#ifndef PCI_DEVICE_ID_NX2_57840_MFO +#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE #endif #ifndef PCI_DEVICE_ID_NX2_57840_MF #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF #endif +#ifndef PCI_DEVICE_ID_NX2_57840_VF +#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF +#endif +#ifndef PCI_DEVICE_ID_NX2_57811 +#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811 +#endif +#ifndef PCI_DEVICE_ID_NX2_57811_MF +#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF +#endif +#ifndef PCI_DEVICE_ID_NX2_57811_VF +#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF +#endif + static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, - { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF }, { 0 } }; MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); +/* Global resources for unloading a previously loaded device */ +#define BNX2X_PREV_WAIT_NEEDED 1 +static DEFINE_SEMAPHORE(bnx2x_prev_sem); +static LIST_HEAD(bnx2x_prev_list); + +/* Forward declaration */ +static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev); +static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp); +static int bnx2x_set_storm_rx_mode(struct bnx2x *bp); + /**************************************************************************** * General service functions ****************************************************************************/ -static inline void __storm_memset_dma_mapping(struct bnx2x *bp, +static void __storm_memset_dma_mapping(struct bnx2x *bp, u32 addr, dma_addr_t mapping) { REG_WR(bp, addr, U64_LO(mapping)); REG_WR(bp, addr + 4, U64_HI(mapping)); } -static inline void storm_memset_spq_addr(struct bnx2x *bp, - dma_addr_t mapping, u16 abs_fid) +static void storm_memset_spq_addr(struct bnx2x *bp, + dma_addr_t mapping, u16 abs_fid) { u32 addr = XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); @@ -231,8 +306,8 @@ static inline void storm_memset_spq_addr(struct bnx2x *bp, __storm_memset_dma_mapping(bp, addr, mapping); } -static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, - u16 pf_id) +static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, + u16 pf_id) { REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), pf_id); @@ -244,8 +319,8 @@ static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, pf_id); } -static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, - u8 enable) +static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, + u8 enable) { REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), enable); @@ -257,8 +332,8 @@ static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, enable); } -static inline void storm_memset_eq_data(struct bnx2x *bp, - struct event_ring_data *eq_data, +static void storm_memset_eq_data(struct bnx2x *bp, + struct event_ring_data *eq_data, u16 pfid) { size_t size = sizeof(struct event_ring_data); @@ -268,8 +343,8 @@ static inline void storm_memset_eq_data(struct bnx2x *bp, __storm_memset_struct(bp, addr, size, (u32 *)eq_data); } -static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, - u16 pfid) +static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, + u16 pfid) { u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); REG_WR16(bp, addr, eq_prod); @@ -304,10 +379,11 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" #define DMAE_DP_DST_NONE "dst_addr [none]" -static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, - int msglvl) +static void bnx2x_dp_dmae(struct bnx2x *bp, + struct dmae_command *dmae, int msglvl) { u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; + int i; switch (dmae->opcode & DMAE_COMMAND_DST) { case DMAE_CMD_DST_PCI: @@ -364,6 +440,9 @@ static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, break; } + for (i = 0; i < (sizeof(struct dmae_command)/4); i++) + DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n", + i, *(((u32 *)dmae) + i)); } /* copy command into DMAE command memory and set DMAE command go */ @@ -375,9 +454,6 @@ void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx); for (i = 0; i < (sizeof(struct dmae_command)/4); i++) { REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); - - DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n", - idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); } REG_WR(bp, dmae_reg_go_c[idx], 1); } @@ -418,7 +494,7 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, return opcode; } -static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, +void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, u8 src_type, u8 dst_type) { @@ -434,37 +510,34 @@ static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, dmae->comp_val = DMAE_COMP_VAL; } -/* issue a dmae command over the init-channel and wailt for completion */ -static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, - struct dmae_command *dmae) +/* issue a dmae command over the init-channel and wait for completion */ +int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, + u32 *comp) { - u32 *wb_comp = bnx2x_sp(bp, wb_comp); int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; int rc = 0; - DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n", - bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], - bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); + bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE); - /* - * Lock the dmae channel. Disable BHs to prevent a dead-lock + /* Lock the dmae channel. Disable BHs to prevent a dead-lock * as long as this code is called both from syscall context and * from ndo_set_rx_mode() flow that may be called from BH. */ spin_lock_bh(&bp->dmae_lock); /* reset completion */ - *wb_comp = 0; + *comp = 0; /* post the command on the channel used for initializations */ bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); /* wait for completion */ udelay(5); - while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { - DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); + while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { - if (!cnt) { + if (!cnt || + (bp->recovery_state != BNX2X_RECOVERY_DONE && + bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { BNX2X_ERR("DMAE timeout!\n"); rc = DMAE_TIMEOUT; goto unlock; @@ -472,15 +545,11 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, cnt--; udelay(50); } - if (*wb_comp & DMAE_PCI_ERR_FLAG) { + if (*comp & DMAE_PCI_ERR_FLAG) { BNX2X_ERR("DMAE PCI error!\n"); rc = DMAE_PCI_ERROR; } - DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n", - bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], - bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); - unlock: spin_unlock_bh(&bp->dmae_lock); return rc; @@ -489,14 +558,16 @@ unlock: void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, u32 len32) { + int rc; struct dmae_command dmae; if (!bp->dmae_ready) { u32 *data = bnx2x_sp(bp, wb_data[0]); - DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)" - " using indirect\n", dst_addr, len32); - bnx2x_init_ind_wr(bp, dst_addr, data, len32); + if (CHIP_IS_E1(bp)) + bnx2x_init_ind_wr(bp, dst_addr, data, len32); + else + bnx2x_init_str_wr(bp, dst_addr, data, len32); return; } @@ -510,24 +581,32 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, dmae.dst_addr_hi = 0; dmae.len = len32; - bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF); - /* issue the command and wait for completion */ - bnx2x_issue_dmae_with_comp(bp, &dmae); + rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); + if (rc) { + BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + } } void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) { + int rc; struct dmae_command dmae; if (!bp->dmae_ready) { u32 *data = bnx2x_sp(bp, wb_data[0]); int i; - DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)" - " using indirect\n", src_addr, len32); - for (i = 0; i < len32; i++) - data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); + if (CHIP_IS_E1(bp)) + for (i = 0; i < len32; i++) + data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); + else + for (i = 0; i < len32; i++) + data[i] = REG_RD(bp, src_addr + i*4); + return; } @@ -541,10 +620,14 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); dmae.len = len32; - bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF); - /* issue the command and wait for completion */ - bnx2x_issue_dmae_with_comp(bp, &dmae); + rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); + if (rc) { + BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + } } static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, @@ -563,27 +646,6 @@ static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); } -/* used only for slowpath so not inlined */ -static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo) -{ - u32 wb_write[2]; - - wb_write[0] = val_hi; - wb_write[1] = val_lo; - REG_WR_DMAE(bp, reg, wb_write, 2); -} - -#ifdef USE_WB_RD -static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg) -{ - u32 wb_data[2]; - - REG_RD_DMAE(bp, reg, wb_data, 2); - - return HILO_U64(wb_data[0], wb_data[1]); -} -#endif - static int bnx2x_mc_assert(struct bnx2x *bp) { char last_idx; @@ -609,8 +671,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp) XSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x" - " 0x%08x 0x%08x 0x%08x\n", + BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { @@ -637,8 +698,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp) TSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x" - " 0x%08x 0x%08x 0x%08x\n", + BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { @@ -665,8 +725,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp) CSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x" - " 0x%08x 0x%08x 0x%08x\n", + BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { @@ -693,8 +752,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp) USTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x" - " 0x%08x 0x%08x 0x%08x\n", + BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { @@ -705,6 +763,10 @@ static int bnx2x_mc_assert(struct bnx2x *bp) return rc; } +#define MCPR_TRACE_BUFFER_SIZE (0x800) +#define SCRATCH_BUFFER_SIZE(bp) \ + (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000)) + void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) { u32 addr, val; @@ -723,25 +785,52 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) - printk("%s" "MCP PC at 0x%x\n", lvl, val); + BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); if (BP_PATH(bp) == 0) trace_shmem_base = bp->common.shmem_base; else trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); - addr = trace_shmem_base - 0x0800 + 4; + + /* sanity */ + if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE || + trace_shmem_base >= MCPR_SCRATCH_BASE(bp) + + SCRATCH_BUFFER_SIZE(bp)) { + BNX2X_ERR("Unable to dump trace buffer (mark %x)\n", + trace_shmem_base); + return; + } + + addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE; + + /* validate TRCB signature */ + mark = REG_RD(bp, addr); + if (mark != MFW_TRACE_SIGNATURE) { + BNX2X_ERR("Trace buffer signature is missing."); + return ; + } + + /* read cyclic buffer pointer */ + addr += 4; mark = REG_RD(bp, addr); - mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) - + ((mark + 0x3) & ~0x3) - 0x08000000; + mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000; + if (mark >= trace_shmem_base || mark < addr + 4) { + BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n"); + return; + } printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); printk("%s", lvl); - for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { + + /* dump buffer after the mark */ + for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) { for (word = 0; word < 8; word++) data[word] = htonl(REG_RD(bp, offset + 4*word)); data[8] = 0x0; pr_cont("%s", (char *)data); } + + /* dump buffer before the mark */ for (offset = addr + 4; offset <= mark; offset += 0x8*4) { for (word = 0; word < 8; word++) data[word] = htonl(REG_RD(bp, offset + 4*word)); @@ -751,12 +840,76 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) printk("%s" "end of fw dump\n", lvl); } -static inline void bnx2x_fw_dump(struct bnx2x *bp) +static void bnx2x_fw_dump(struct bnx2x *bp) { bnx2x_fw_dump_lvl(bp, KERN_ERR); } -void bnx2x_panic_dump(struct bnx2x *bp) +static void bnx2x_hc_int_disable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; + u32 val = REG_RD(bp, addr); + + /* in E1 we must use only PCI configuration space to disable + * MSI/MSIX capability + * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block + */ + if (CHIP_IS_E1(bp)) { + /* Since IGU_PF_CONF_MSI_MSIX_EN still always on + * Use mask register to prevent from HC sending interrupts + * after we exit the function + */ + REG_WR(bp, HC_REG_INT_MASK + port*4, 0); + + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + } else + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + + DP(NETIF_MSG_IFDOWN, + "write %x to HC %d (addr 0x%x)\n", + val, port, addr); + + /* flush all outstanding writes */ + mmiowb(); + + REG_WR(bp, addr, val); + if (REG_RD(bp, addr) != val) + BNX2X_ERR("BUG! Proper val not read from IGU!\n"); +} + +static void bnx2x_igu_int_disable(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); + + val &= ~(IGU_PF_CONF_MSI_MSIX_EN | + IGU_PF_CONF_INT_LINE_EN | + IGU_PF_CONF_ATTN_BIT_EN); + + DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); + + /* flush all outstanding writes */ + mmiowb(); + + REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) + BNX2X_ERR("BUG! Proper val not read from IGU!\n"); +} + +static void bnx2x_int_disable(struct bnx2x *bp) +{ + if (bp->common.int_block == INT_BLOCK_HC) + bnx2x_hc_int_disable(bp); + else + bnx2x_igu_int_disable(bp); +} + +void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) { int i; u16 j; @@ -766,43 +919,52 @@ void bnx2x_panic_dump(struct bnx2x *bp) u16 start = 0, end = 0; u8 cos; #endif + if (IS_PF(bp) && disable_int) + bnx2x_int_disable(bp); bp->stats_state = STATS_STATE_DISABLED; + bp->eth_stats.unrecoverable_error++; DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); BNX2X_ERR("begin crash dump -----------------\n"); /* Indices */ /* Common */ - BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)" - " spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", - bp->def_idx, bp->def_att_idx, bp->attn_state, - bp->spq_prod_idx, bp->stats_counter); - BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", - bp->def_status_blk->atten_status_block.attn_bits, - bp->def_status_blk->atten_status_block.attn_bits_ack, - bp->def_status_blk->atten_status_block.status_block_id, - bp->def_status_blk->atten_status_block.attn_bits_index); - BNX2X_ERR(" def ("); - for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) - pr_cont("0x%x%s", - bp->def_status_blk->sp_sb.index_values[i], - (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); - - for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) - *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + - i*sizeof(u32)); - - pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", - sp_sb_data.igu_sb_id, - sp_sb_data.igu_seg_id, - sp_sb_data.p_func.pf_id, - sp_sb_data.p_func.vnic_id, - sp_sb_data.p_func.vf_id, - sp_sb_data.p_func.vf_valid, - sp_sb_data.state); - + if (IS_PF(bp)) { + struct host_sp_status_block *def_sb = bp->def_status_blk; + int data_size, cstorm_offset; + + BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", + bp->def_idx, bp->def_att_idx, bp->attn_state, + bp->spq_prod_idx, bp->stats_counter); + BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", + def_sb->atten_status_block.attn_bits, + def_sb->atten_status_block.attn_bits_ack, + def_sb->atten_status_block.status_block_id, + def_sb->atten_status_block.attn_bits_index); + BNX2X_ERR(" def ("); + for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) + pr_cont("0x%x%s", + def_sb->sp_sb.index_values[i], + (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); + + data_size = sizeof(struct hc_sp_status_block_data) / + sizeof(u32); + cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func); + for (i = 0; i < data_size; i++) + *((u32 *)&sp_sb_data + i) = + REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset + + i * sizeof(u32)); + + pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", + sp_sb_data.igu_sb_id, + sp_sb_data.igu_seg_id, + sp_sb_data.p_func.pf_id, + sp_sb_data.p_func.vnic_id, + sp_sb_data.p_func.vf_id, + sp_sb_data.p_func.vf_valid, + sp_sb_data.state); + } for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; @@ -822,24 +984,19 @@ void bnx2x_panic_dump(struct bnx2x *bp) struct bnx2x_fp_txdata txdata; /* Rx */ - BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)" - " rx_comp_prod(0x%x)" - " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", + BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", i, fp->rx_bd_prod, fp->rx_bd_cons, fp->rx_comp_prod, fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); - BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)" - " fp_hc_idx(0x%x)\n", + BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n", fp->rx_sge_prod, fp->last_max_sge, le16_to_cpu(fp->fp_hc_idx)); /* Tx */ for_each_cos_in_tx_queue(fp, cos) { - txdata = fp->txdata[cos]; - BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)" - " tx_bd_prod(0x%x) tx_bd_cons(0x%x)" - " *tx_cons_sb(0x%x)\n", + txdata = *fp->txdata_ptr[cos]; + BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", i, txdata.tx_pkt_prod, txdata.tx_pkt_cons, txdata.tx_bd_prod, txdata.tx_bd_cons, @@ -851,10 +1008,9 @@ void bnx2x_panic_dump(struct bnx2x *bp) /* host sb data */ -#ifdef BCM_CNIC if (IS_FCOE_FP(fp)) continue; -#endif + BNX2X_ERR(" run indexes ("); for (j = 0; j < HC_SB_MAX_SM; j++) pr_cont("0x%x%s", @@ -866,6 +1022,11 @@ void bnx2x_panic_dump(struct bnx2x *bp) pr_cont("0x%x%s", fp->sb_index_values[j], (j == loop - 1) ? ")" : " "); + + /* VF cannot access FW refelection for status block */ + if (IS_VF(bp)) + continue; + /* fw sb data */ data_size = CHIP_IS_E1x(bp) ? sizeof(struct hc_status_block_data_e1x) : @@ -881,9 +1042,7 @@ void bnx2x_panic_dump(struct bnx2x *bp) j * sizeof(u32)); if (!CHIP_IS_E1x(bp)) { - pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " - "vnic_id(0x%x) same_igu_sb_1b(0x%x) " - "state(0x%x)\n", + pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", sb_data_e2.common.p_func.pf_id, sb_data_e2.common.p_func.vf_id, sb_data_e2.common.p_func.vf_valid, @@ -891,9 +1050,7 @@ void bnx2x_panic_dump(struct bnx2x *bp) sb_data_e2.common.same_igu_sb_1b, sb_data_e2.common.state); } else { - pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " - "vnic_id(0x%x) same_igu_sb_1b(0x%x) " - "state(0x%x)\n", + pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", sb_data_e1x.common.p_func.pf_id, sb_data_e1x.common.p_func.vf_id, sb_data_e1x.common.p_func.vf_valid, @@ -904,30 +1061,40 @@ void bnx2x_panic_dump(struct bnx2x *bp) /* SB_SMs data */ for (j = 0; j < HC_SB_MAX_SM; j++) { - pr_cont("SM[%d] __flags (0x%x) " - "igu_sb_id (0x%x) igu_seg_id(0x%x) " - "time_to_expire (0x%x) " - "timer_value(0x%x)\n", j, - hc_sm_p[j].__flags, - hc_sm_p[j].igu_sb_id, - hc_sm_p[j].igu_seg_id, - hc_sm_p[j].time_to_expire, - hc_sm_p[j].timer_value); + pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n", + j, hc_sm_p[j].__flags, + hc_sm_p[j].igu_sb_id, + hc_sm_p[j].igu_seg_id, + hc_sm_p[j].time_to_expire, + hc_sm_p[j].timer_value); } - /* Indecies data */ + /* Indices data */ for (j = 0; j < loop; j++) { - pr_cont("INDEX[%d] flags (0x%x) " - "timeout (0x%x)\n", j, + pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, hc_index_p[j].flags, hc_index_p[j].timeout); } } #ifdef BNX2X_STOP_ON_ERROR + if (IS_PF(bp)) { + /* event queue */ + BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); + for (i = 0; i < NUM_EQ_DESC; i++) { + u32 *data = (u32 *)&bp->eq_ring[i].message.data; + + BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", + i, bp->eq_ring[i].message.opcode, + bp->eq_ring[i].message.error); + BNX2X_ERR("data: %x %x %x\n", + data[0], data[1], data[2]); + } + } + /* Rings */ /* Rx */ - for_each_rx_queue(bp, i) { + for_each_valid_rx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); @@ -961,10 +1128,10 @@ void bnx2x_panic_dump(struct bnx2x *bp) } /* Tx */ - for_each_tx_queue(bp, i) { + for_each_valid_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; + struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); @@ -972,8 +1139,7 @@ void bnx2x_panic_dump(struct bnx2x *bp) struct sw_tx_bd *sw_bd = &txdata->tx_buf_ring[j]; - BNX2X_ERR("fp%d: txdata %d, " - "packet[%x]=[%p,%x]\n", + BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n", i, cos, j, sw_bd->skb, sw_bd->first_bd); } @@ -983,16 +1149,17 @@ void bnx2x_panic_dump(struct bnx2x *bp) for (j = start; j != end; j = TX_BD(j + 1)) { u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; - BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=" - "[%x:%x:%x:%x]\n", + BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n", i, cos, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]); } } } #endif - bnx2x_fw_dump(bp); - bnx2x_mc_assert(bp); + if (IS_PF(bp)) { + bnx2x_fw_dump(bp); + bnx2x_mc_assert(bp); + } BNX2X_ERR("end crash dump -----------------\n"); } @@ -1002,9 +1169,9 @@ void bnx2x_panic_dump(struct bnx2x *bp) * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW * initialization. */ -#define FLR_WAIT_USEC 10000 /* 10 miliseconds */ -#define FLR_WAIT_INTERAVAL 50 /* usec */ -#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */ +#define FLR_WAIT_USEC 10000 /* 10 milliseconds */ +#define FLR_WAIT_INTERVAL 50 /* usec */ +#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ struct pbf_pN_buf_regs { int pN; @@ -1037,7 +1204,7 @@ static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < (init_crd - crd_start))) { if (cur_cnt--) { - udelay(FLR_WAIT_INTERAVAL); + udelay(FLR_WAIT_INTERVAL); crd = REG_RD(bp, regs->crd); crd_freed = REG_RD(bp, regs->crd_freed); } else { @@ -1051,7 +1218,7 @@ static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, } } DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", - poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); + poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); } static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, @@ -1069,7 +1236,7 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { if (cur_cnt--) { - udelay(FLR_WAIT_INTERAVAL); + udelay(FLR_WAIT_INTERVAL); occup = REG_RD(bp, regs->lines_occup); freed = REG_RD(bp, regs->lines_freed); } else { @@ -1083,23 +1250,23 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, } } DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", - poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); + poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); } -static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, - u32 expected, u32 poll_count) +static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, + u32 expected, u32 poll_count) { u32 cur_cnt = poll_count; u32 val; while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) - udelay(FLR_WAIT_INTERAVAL); + udelay(FLR_WAIT_INTERVAL); return val; } -static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, - char *msg, u32 poll_cnt) +int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, + char *msg, u32 poll_cnt) { u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); if (val != 0) { @@ -1109,7 +1276,8 @@ static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, return 0; } -static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) +/* Common routines with VF FLR cleanup */ +u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) { /* adjust polling timeout */ if (CHIP_REV_IS_EMUL(bp)) @@ -1121,7 +1289,7 @@ static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) return FLR_POLL_CNT; } -static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) +void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) { struct pbf_pN_cmd_regs cmd_regs[] = { {0, (CHIP_IS_E3B0(bp)) ? @@ -1180,7 +1348,6 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); - /* Verify the transmission buffers are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(buf_regs); i++) bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); @@ -1195,49 +1362,44 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) #define OP_GEN_AGG_VECT(index) \ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) - -static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, - u32 poll_cnt) +int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) { - struct sdm_op_gen op_gen = {0}; - + u32 op_gen_command = 0; u32 comp_addr = BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); int ret = 0; if (REG_RD(bp, comp_addr)) { - BNX2X_ERR("Cleanup complete is not 0\n"); + BNX2X_ERR("Cleanup complete was not 0 before sending\n"); return 1; } - op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); - op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); - op_gen.command |= OP_GEN_AGG_VECT(clnup_func); - op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; + op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); + op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); + op_gen_command |= OP_GEN_AGG_VECT(clnup_func); + op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; - DP(BNX2X_MSG_SP, "FW Final cleanup\n"); - REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command); + DP(BNX2X_MSG_SP, "sending FW Final cleanup\n"); + REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command); if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { BNX2X_ERR("FW final cleanup did not succeed\n"); - ret = 1; + DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", + (REG_RD(bp, comp_addr))); + bnx2x_panic(); + return 1; } - /* Zero completion for nxt FLR */ + /* Zero completion for next FLR */ REG_WR(bp, comp_addr, 0); return ret; } -static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev) +u8 bnx2x_is_pcie_pending(struct pci_dev *dev) { - int pos; u16 status; - pos = pci_pcie_cap(dev); - if (!pos) - return false; - - pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); return status & PCI_EXP_DEVSTA_TRPND; } @@ -1245,7 +1407,6 @@ static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev) */ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) { - /* wait for CFC PF usage-counter to zero (includes all the VFs) */ if (bnx2x_flr_clnup_poll_hw_counter(bp, CFC_REG_NUM_LCIDS_INSIDE_PF, @@ -1253,7 +1414,6 @@ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) poll_cnt)) return 1; - /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ if (bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_PF_USAGE_CNT, @@ -1283,7 +1443,7 @@ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) /* Wait DMAE PF usage counter to zero */ if (bnx2x_flr_clnup_poll_hw_counter(bp, dmae_reg_go_c[INIT_DMAE_C(bp)], - "DMAE dommand register timed out", + "DMAE command register timed out", poll_cnt)) return 1; @@ -1330,6 +1490,7 @@ static int bnx2x_pf_flr_clnup(struct bnx2x *bp) REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); /* Poll HW usage counters */ + DP(BNX2X_MSG_SP, "Polling usage counters\n"); if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) return -EBUSY; @@ -1368,14 +1529,17 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) int port = BP_PORT(bp); u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; u32 val = REG_RD(bp, addr); - int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; - int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; + bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; + bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; + bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; if (msix) { val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0); val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); + if (single_msix) + val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; } else if (msi) { val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | @@ -1388,8 +1552,8 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) HC_CONFIG_0_REG_ATTN_BIT_EN_0); if (!CHIP_IS_E1(bp)) { - DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", - val, port, addr); + DP(NETIF_MSG_IFUP, + "write %x to HC %d (addr 0x%x)\n", val, port, addr); REG_WR(bp, addr, val); @@ -1400,8 +1564,9 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) if (CHIP_IS_E1(bp)) REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); - DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", - val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); + DP(NETIF_MSG_IFUP, + "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, + (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); REG_WR(bp, addr, val); /* @@ -1431,36 +1596,48 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) static void bnx2x_igu_int_enable(struct bnx2x *bp) { u32 val; - int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; - int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; + bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; + bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; + bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); if (msix) { val &= ~(IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN); - val |= (IGU_PF_CONF_FUNC_EN | - IGU_PF_CONF_MSI_MSIX_EN | + val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN); + + if (single_msix) + val |= IGU_PF_CONF_SINGLE_ISR_EN; } else if (msi) { val &= ~IGU_PF_CONF_INT_LINE_EN; - val |= (IGU_PF_CONF_FUNC_EN | - IGU_PF_CONF_MSI_MSIX_EN | + val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } else { val &= ~IGU_PF_CONF_MSI_MSIX_EN; - val |= (IGU_PF_CONF_FUNC_EN | - IGU_PF_CONF_INT_LINE_EN | + val |= (IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } - DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n", + /* Clean previous status - need to configure igu prior to ack*/ + if ((!msix) || single_msix) { + REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + bnx2x_ack_int(bp); + } + + val |= IGU_PF_CONF_FUNC_EN; + + DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + if (val & IGU_PF_CONF_INT_LINE_EN) + pci_intx(bp->pdev, true); + barrier(); /* init leading/trailing edge */ @@ -1487,70 +1664,6 @@ void bnx2x_int_enable(struct bnx2x *bp) bnx2x_igu_int_enable(bp); } -static void bnx2x_hc_int_disable(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; - u32 val = REG_RD(bp, addr); - - /* - * in E1 we must use only PCI configuration space to disable - * MSI/MSIX capablility - * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block - */ - if (CHIP_IS_E1(bp)) { - /* Since IGU_PF_CONF_MSI_MSIX_EN still always on - * Use mask register to prevent from HC sending interrupts - * after we exit the function - */ - REG_WR(bp, HC_REG_INT_MASK + port*4, 0); - - val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | - HC_CONFIG_0_REG_INT_LINE_EN_0 | - HC_CONFIG_0_REG_ATTN_BIT_EN_0); - } else - val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | - HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | - HC_CONFIG_0_REG_INT_LINE_EN_0 | - HC_CONFIG_0_REG_ATTN_BIT_EN_0); - - DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", - val, port, addr); - - /* flush all outstanding writes */ - mmiowb(); - - REG_WR(bp, addr, val); - if (REG_RD(bp, addr) != val) - BNX2X_ERR("BUG! proper val not read from IGU!\n"); -} - -static void bnx2x_igu_int_disable(struct bnx2x *bp) -{ - u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); - - val &= ~(IGU_PF_CONF_MSI_MSIX_EN | - IGU_PF_CONF_INT_LINE_EN | - IGU_PF_CONF_ATTN_BIT_EN); - - DP(NETIF_MSG_INTR, "write %x to IGU\n", val); - - /* flush all outstanding writes */ - mmiowb(); - - REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); - if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) - BNX2X_ERR("BUG! proper val not read from IGU!\n"); -} - -void bnx2x_int_disable(struct bnx2x *bp) -{ - if (bp->common.int_block == INT_BLOCK_HC) - bnx2x_hc_int_disable(bp); - else - bnx2x_igu_int_disable(bp); -} - void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) { int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; @@ -1564,9 +1677,8 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) if (msix) { synchronize_irq(bp->msix_table[0].vector); offset = 1; -#ifdef BCM_CNIC - offset++; -#endif + if (CNIC_SUPPORT(bp)) + offset++; for_each_eth_queue(bp, i) synchronize_irq(bp->msix_table[offset++].vector); } else @@ -1592,11 +1704,12 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) int func = BP_FUNC(bp); u32 hw_lock_control_reg; - DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource); + DP(NETIF_MSG_HW | NETIF_MSG_IFUP, + "Trying to take a lock on resource %d\n", resource); /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { - DP(NETIF_MSG_HW, + DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", resource, HW_LOCK_MAX_RESOURCE_VALUE); return false; @@ -1614,7 +1727,8 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) if (lock_status & resource_bit) return true; - DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource); + DP(NETIF_MSG_HW | NETIF_MSG_IFUP, + "Failed to get a lock on resource %d\n", resource); return false; } @@ -1626,7 +1740,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) * Returns the recovery leader resource id according to the engine this function * belongs to. Currently only only 2 engines is supported. */ -static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp) +static int bnx2x_get_leader_lock_resource(struct bnx2x *bp) { if (BP_PATH(bp)) return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; @@ -1635,20 +1749,37 @@ static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp) } /** - * bnx2x_trylock_leader_lock- try to aquire a leader lock. + * bnx2x_trylock_leader_lock- try to acquire a leader lock. * * @bp: driver handle * - * Tries to aquire a leader lock for cuurent engine. + * Tries to acquire a leader lock for current engine. */ -static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp) +static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) { return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); } -#ifdef BCM_CNIC static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); -#endif + +/* schedule the sp task and mark that interrupt occurred (runs from ISR) */ +static int bnx2x_schedule_sp_task(struct bnx2x *bp) +{ + /* Set the interrupt occurred bit for the sp-task to recognize it + * must ack the interrupt and transition according to the IGU + * state machine. + */ + atomic_set(&bp->interrupt_occurred, 1); + + /* The sp_task must execute only after this bit + * is set, otherwise we will get out of sync and miss all + * further interrupts. Hence, the barrier. + */ + smp_wmb(); + + /* schedule sp_task to workqueue */ + return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); +} void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) { @@ -1656,13 +1787,20 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; - struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; + struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj; DP(BNX2X_MSG_SP, "fp %d cid %d got ramrod #%d state is %x type is %d\n", fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.ramrod_type); + /* If cid is within VF range, replace the slowpath object with the + * one corresponding to this VF + */ + if (cid >= BNX2X_FIRST_VF_CID && + cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS) + bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj); + switch (command) { case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); @@ -1675,7 +1813,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) break; case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): - DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid); + DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; break; @@ -1685,7 +1823,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) break; case (RAMROD_CMD_ID_ETH_TERMINATE): - DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid); + DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid); drv_cmd = BNX2X_Q_CMD_TERMINATE; break; @@ -1694,6 +1832,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) drv_cmd = BNX2X_Q_CMD_EMPTY; break; + case (RAMROD_CMD_ID_ETH_TPA_UPDATE): + DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid); + drv_cmd = BNX2X_Q_CMD_UPDATE_TPA; + break; + default: BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", command, fp->index); @@ -1715,23 +1858,35 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) return; #endif - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(&bp->cq_spq_left); /* push the change in bp->spq_left and towards the memory */ - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); - return; -} + if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && + (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { + /* if Q update ramrod is completed for last Q in AFEX vif set + * flow, then ACK MCP at the end + * + * mark pending ACK to MCP bit. + * prevent case that both bits are cleared. + * At the end of load/unload driver checks that + * sp_state is cleared, and this order prevents + * races + */ + smp_mb__before_atomic(); + set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); + wmb(); + clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); + smp_mb__after_atomic(); -void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, - u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod) -{ - u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset; + /* schedule the sp task as mcp ack is required */ + bnx2x_schedule_sp_task(bp); + } - bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod, - start); + return; } irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) @@ -1757,37 +1912,39 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; - mask = 0x2 << (fp->index + CNIC_PRESENT); + mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); if (status & mask) { /* Handle Rx or Tx according to SB id */ - prefetch(fp->rx_cons_sb); for_each_cos_in_tx_queue(fp, cos) - prefetch(fp->txdata[cos].tx_cons_sb); + prefetch(fp->txdata_ptr[cos]->tx_cons_sb); prefetch(&fp->sb_running_index[SM_RX_ID]); napi_schedule(&bnx2x_fp(bp, fp->index, napi)); status &= ~mask; } } -#ifdef BCM_CNIC - mask = 0x2; - if (status & (mask | 0x1)) { - struct cnic_ops *c_ops = NULL; + if (CNIC_SUPPORT(bp)) { + mask = 0x2; + if (status & (mask | 0x1)) { + struct cnic_ops *c_ops = NULL; - if (likely(bp->state == BNX2X_STATE_OPEN)) { rcu_read_lock(); c_ops = rcu_dereference(bp->cnic_ops); - if (c_ops) + if (c_ops && (bp->cnic_eth_dev.drv_state & + CNIC_DRV_STATE_HANDLES_IRQ)) c_ops->cnic_handler(bp->cnic_data, NULL); rcu_read_unlock(); - } - status &= ~mask; + status &= ~mask; + } } -#endif if (unlikely(status & 0x1)) { - queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); + + /* schedule sp task to perform default status block work, ack + * attentions and enable interrupts. + */ + bnx2x_schedule_sp_task(bp); status &= ~0x1; if (!status) @@ -1817,8 +1974,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { - DP(NETIF_MSG_HW, - "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", + BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", resource, HW_LOCK_MAX_RESOURCE_VALUE); return -EINVAL; } @@ -1833,7 +1989,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) /* Validating that the resource is not already taken */ lock_status = REG_RD(bp, hw_lock_control_reg); if (lock_status & resource_bit) { - DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", + BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n", lock_status, resource_bit); return -EEXIST; } @@ -1846,9 +2002,9 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) if (lock_status & resource_bit) return 0; - msleep(5); + usleep_range(5000, 10000); } - DP(NETIF_MSG_HW, "Timeout\n"); + BNX2X_ERR("Timeout\n"); return -EAGAIN; } @@ -1864,12 +2020,9 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) int func = BP_FUNC(bp); u32 hw_lock_control_reg; - DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource); - /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { - DP(NETIF_MSG_HW, - "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", + BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", resource, HW_LOCK_MAX_RESOURCE_VALUE); return -EINVAL; } @@ -1884,8 +2037,8 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) /* Validating that the resource is currently taken */ lock_status = REG_RD(bp, hw_lock_control_reg); if (!(lock_status & resource_bit)) { - DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", - lock_status, resource_bit); + BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n", + lock_status, resource_bit); return -EFAULT; } @@ -1893,7 +2046,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) return 0; } - int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) { /* The GPIO should be swapped if swap register is set and active */ @@ -1945,7 +2097,8 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) switch (mode) { case MISC_REGISTERS_GPIO_OUTPUT_LOW: - DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n", + DP(NETIF_MSG_LINK, + "Set GPIO %d (shift %d) -> output low\n", gpio_num, gpio_shift); /* clear FLOAT and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); @@ -1953,7 +2106,8 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) break; case MISC_REGISTERS_GPIO_OUTPUT_HIGH: - DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n", + DP(NETIF_MSG_LINK, + "Set GPIO %d (shift %d) -> output high\n", gpio_num, gpio_shift); /* clear FLOAT and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); @@ -1961,7 +2115,8 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) break; case MISC_REGISTERS_GPIO_INPUT_HI_Z: - DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", + DP(NETIF_MSG_LINK, + "Set GPIO %d (shift %d) -> input\n", gpio_num, gpio_shift); /* set FLOAT */ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); @@ -2045,16 +2200,18 @@ int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) switch (mode) { case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: - DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> " - "output low\n", gpio_num, gpio_shift); + DP(NETIF_MSG_LINK, + "Clear GPIO INT %d (shift %d) -> output low\n", + gpio_num, gpio_shift); /* clear SET and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); break; case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: - DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> " - "output high\n", gpio_num, gpio_shift); + DP(NETIF_MSG_LINK, + "Set GPIO INT %d (shift %d) -> output high\n", + gpio_num, gpio_shift); /* clear CLR and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); @@ -2070,40 +2227,39 @@ int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) return 0; } -static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) +static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode) { - u32 spio_mask = (1 << spio_num); u32 spio_reg; - if ((spio_num < MISC_REGISTERS_SPIO_4) || - (spio_num > MISC_REGISTERS_SPIO_7)) { - BNX2X_ERR("Invalid SPIO %d\n", spio_num); + /* Only 2 SPIOs are configurable */ + if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { + BNX2X_ERR("Invalid SPIO 0x%x\n", spio); return -EINVAL; } bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); /* read SPIO and mask except the float bits */ - spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); + spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT); switch (mode) { - case MISC_REGISTERS_SPIO_OUTPUT_LOW: - DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); + case MISC_SPIO_OUTPUT_LOW: + DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio); /* clear FLOAT and set CLR */ - spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); - spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); + spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); + spio_reg |= (spio << MISC_SPIO_CLR_POS); break; - case MISC_REGISTERS_SPIO_OUTPUT_HIGH: - DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); + case MISC_SPIO_OUTPUT_HIGH: + DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio); /* clear FLOAT and set SET */ - spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); - spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS); + spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); + spio_reg |= (spio << MISC_SPIO_SET_POS); break; - case MISC_REGISTERS_SPIO_INPUT_HI_Z: - DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num); + case MISC_SPIO_INPUT_HI_Z: + DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio); /* set FLOAT */ - spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); + spio_reg |= (spio << MISC_SPIO_FLOAT_POS); break; default: @@ -2142,22 +2298,42 @@ void bnx2x_calc_fc_adv(struct bnx2x *bp) } } -u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) +static void bnx2x_set_requested_fc(struct bnx2x *bp) { - if (!BP_NOMCP(bp)) { - u8 rc; - int cfx_idx = bnx2x_get_link_cfg_idx(bp); - u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; - /* - * Initialize link parameters structure variables - * It is recommended to turn off RX FC for jumbo frames - * for better performance - */ - if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) - bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; - else - bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; + /* Initialize link parameters structure variables + * It is recommended to turn off RX FC for jumbo frames + * for better performance + */ + if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) + bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; + else + bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; +} + +static void bnx2x_init_dropless_fc(struct bnx2x *bp) +{ + u32 pause_enabled = 0; + if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { + if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) + pause_enabled = 1; + + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)), + pause_enabled); + } + + DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n", + pause_enabled ? "enabled" : "disabled"); +} + +int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) +{ + int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); + u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; + + if (!BP_NOMCP(bp)) { + bnx2x_set_requested_fc(bp); bnx2x_acquire_phy_lock(bp); if (load_mode == LOAD_DIAG) { @@ -2175,17 +2351,24 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) } } + if (load_mode == LOAD_LOOPBACK_EXT) { + struct link_params *lp = &bp->link_params; + lp->loopback_mode = LOOPBACK_EXT; + } + rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_release_phy_lock(bp); + bnx2x_init_dropless_fc(bp); + bnx2x_calc_fc_adv(bp); - if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) { + if (bp->link_vars.link_up) { bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); bnx2x_link_report(bp); - } else - queue_delayed_work(bnx2x_wq, &bp->period_task, 0); + } + queue_delayed_work(bnx2x_wq, &bp->period_task, 0); bp->link_params.req_line_speed[cfx_idx] = req_line_speed; return rc; } @@ -2197,10 +2380,11 @@ void bnx2x_link_set(struct bnx2x *bp) { if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); - bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_release_phy_lock(bp); + bnx2x_init_dropless_fc(bp); + bnx2x_calc_fc_adv(bp); } else BNX2X_ERR("Bootcode is missing - can not set link\n"); @@ -2210,12 +2394,19 @@ static void bnx2x__link_reset(struct bnx2x *bp) { if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); - bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); bnx2x_release_phy_lock(bp); } else BNX2X_ERR("Bootcode is missing - can not reset link\n"); } +void bnx2x_force_link_reset(struct bnx2x *bp) +{ + bnx2x_acquire_phy_lock(bp); + bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_release_phy_lock(bp); +} + u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) { u8 rc = 0; @@ -2231,56 +2422,21 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) return rc; } -static void bnx2x_init_port_minmax(struct bnx2x *bp) -{ - u32 r_param = bp->link_vars.line_speed / 8; - u32 fair_periodic_timeout_usec; - u32 t_fair; - - memset(&(bp->cmng.rs_vars), 0, - sizeof(struct rate_shaping_vars_per_port)); - memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port)); - - /* 100 usec in SDM ticks = 25 since each tick is 4 usec */ - bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4; - - /* this is the threshold below which no timer arming will occur - 1.25 coefficient is for the threshold to be a little bigger - than the real time, to compensate for timer in-accuracy */ - bp->cmng.rs_vars.rs_threshold = - (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4; - - /* resolution of fairness timer */ - fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; - /* for 10G it is 1000usec. for 1G it is 10000usec. */ - t_fair = T_FAIR_COEF / bp->link_vars.line_speed; - - /* this is the threshold below which we won't arm the timer anymore */ - bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES; - - /* we multiply by 1e3/8 to get bytes/msec. - We don't want the credits to pass a credit - of the t_fair*FAIR_MEM (algorithm resolution) */ - bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM; - /* since each tick is 4 usec */ - bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4; -} - /* Calculates the sum of vn_min_rates. It's needed for further normalizing of the min_rates. Returns: sum of vn_min_rates. or 0 - if all the min_rates are 0. - In the later case fainess algorithm should be deactivated. + In the later case fairness algorithm should be deactivated. If not all min_rates are zero then those that are zeroes will be set to 1. */ -static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) +static void bnx2x_calc_vn_min(struct bnx2x *bp, + struct cmng_init_input *input) { int all_zero = 1; int vn; - bp->vn_weight_sum = 0; for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { u32 vn_cfg = bp->mf_config[vn]; u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> @@ -2288,104 +2444,53 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) /* Skip hidden vns */ if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) - continue; - + vn_min_rate = 0; /* If min rate is zero - set it to 1 */ - if (!vn_min_rate) + else if (!vn_min_rate) vn_min_rate = DEF_MIN_RATE; else all_zero = 0; - bp->vn_weight_sum += vn_min_rate; + input->vnic_min_rate[vn] = vn_min_rate; } /* if ETS or all min rates are zeros - disable fairness */ if (BNX2X_IS_ETS_ENABLED(bp)) { - bp->cmng.flags.cmng_enables &= + input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); } else if (all_zero) { - bp->cmng.flags.cmng_enables &= + input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; - DP(NETIF_MSG_IFUP, "All MIN values are zeroes" - " fairness will be disabled\n"); + DP(NETIF_MSG_IFUP, + "All MIN values are zeroes fairness will be disabled\n"); } else - bp->cmng.flags.cmng_enables |= + input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; } -static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) +static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, + struct cmng_init_input *input) { - struct rate_shaping_vars_per_vn m_rs_vn; - struct fairness_vars_per_vn m_fair_vn; + u16 vn_max_rate; u32 vn_cfg = bp->mf_config[vn]; - int func = func_by_vn(bp, vn); - u16 vn_min_rate, vn_max_rate; - int i; - /* If function is hidden - set min and max to zeroes */ - if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { - vn_min_rate = 0; + if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) vn_max_rate = 0; - - } else { + else { u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); - vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT) * 100; - /* If fairness is enabled (not all min rates are zeroes) and - if current min rate is zero - set it to 1. - This is a requirement of the algorithm. */ - if (bp->vn_weight_sum && (vn_min_rate == 0)) - vn_min_rate = DEF_MIN_RATE; - - if (IS_MF_SI(bp)) + if (IS_MF_SI(bp)) { /* maxCfg in percents of linkspeed */ vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; - else + } else /* SD modes */ /* maxCfg is absolute in 100Mb units */ vn_max_rate = maxCfg * 100; } - DP(NETIF_MSG_IFUP, - "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n", - func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); - - memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); - memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn)); - - /* global vn counter - maximal Mbps for this vn */ - m_rs_vn.vn_counter.rate = vn_max_rate; - - /* quota - number of bytes transmitted in this period */ - m_rs_vn.vn_counter.quota = - (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8; - - if (bp->vn_weight_sum) { - /* credit for each period of the fairness algorithm: - number of bytes in T_FAIR (the vn share the port rate). - vn_weight_sum should not be larger than 10000, thus - T_FAIR_COEF / (8 * vn_weight_sum) will always be greater - than zero */ - m_fair_vn.vn_credit_delta = - max_t(u32, (vn_min_rate * (T_FAIR_COEF / - (8 * bp->vn_weight_sum))), - (bp->cmng.fair_vars.fair_threshold + - MIN_ABOVE_THRESH)); - DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", - m_fair_vn.vn_credit_delta); - } - - /* Store it to internal memory */ - for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++) - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4, - ((u32 *)(&m_rs_vn))[i]); - - for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++) - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4, - ((u32 *)(&m_fair_vn))[i]); + DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); + + input->vnic_max_rate[vn] = vn_max_rate; } static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) @@ -2403,7 +2508,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp) int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); if (BP_NOMCP(bp)) - return; /* what should be the default bvalue in this case */ + return; /* what should be the default value in this case */ /* For 2 port configuration the absolute function number formula * is: @@ -2425,38 +2530,42 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp) bp->mf_config[vn] = MF_CFG_RD(bp, func_mf_config[func].config); } + if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { + DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); + bp->flags |= MF_FUNC_DIS; + } else { + DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); + bp->flags &= ~MF_FUNC_DIS; + } } static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) { + struct cmng_init_input input; + memset(&input, 0, sizeof(struct cmng_init_input)); - if (cmng_type == CMNG_FNS_MINMAX) { - int vn; + input.port_rate = bp->link_vars.line_speed; - /* clear cmng_enables */ - bp->cmng.flags.cmng_enables = 0; + if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { + int vn; /* read mf conf from shmem */ if (read_cfg) bnx2x_read_mf_cfg(bp); - /* Init rate shaping and fairness contexts */ - bnx2x_init_port_minmax(bp); - /* vn_weight_sum and enable fairness if not 0 */ - bnx2x_calc_vn_weight_sum(bp); + bnx2x_calc_vn_min(bp, &input); /* calculate and set min-max rate for each vn */ if (bp->port.pmf) for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) - bnx2x_init_vn_minmax(bp, vn); + bnx2x_calc_vn_max(bp, vn, &input); /* always enable rate shaping and fairness */ - bp->cmng.flags.cmng_enables |= + input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; - if (!bp->vn_weight_sum) - DP(NETIF_MSG_IFUP, "All MIN values are zeroes" - " fairness will be disabled\n"); + + bnx2x_init_cmng(&input, &bp->cmng); return; } @@ -2465,6 +2574,50 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) "rate shaping and fairness are disabled\n"); } +static void storm_memset_cmng(struct bnx2x *bp, + struct cmng_init *cmng, + u8 port) +{ + int vn; + size_t size = sizeof(struct cmng_struct_per_port); + + u32 addr = BAR_XSTRORM_INTMEM + + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); + + __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); + + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { + int func = func_by_vn(bp, vn); + + addr = BAR_XSTRORM_INTMEM + + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func); + size = sizeof(struct rate_shaping_vars_per_vn); + __storm_memset_struct(bp, addr, size, + (u32 *)&cmng->vnic.vnic_max_rate[vn]); + + addr = BAR_XSTRORM_INTMEM + + XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func); + size = sizeof(struct fairness_vars_per_vn); + __storm_memset_struct(bp, addr, size, + (u32 *)&cmng->vnic.vnic_min_rate[vn]); + } +} + +/* init cmng mode in HW according to local configuration */ +void bnx2x_set_local_cmng(struct bnx2x *bp) +{ + int cmng_fns = bnx2x_get_cmng_fns_mode(bp); + + if (cmng_fns != CMNG_FNS_NONE) { + bnx2x_cmng_fns_init(bp, false, cmng_fns); + storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); + } else { + /* rate shaping and fairness are disabled */ + DP(NETIF_MSG_IFUP, + "single function mode without fairness\n"); + } +} + /* This function is called upon link interrupt */ static void bnx2x_link_attn(struct bnx2x *bp) { @@ -2473,20 +2626,9 @@ static void bnx2x_link_attn(struct bnx2x *bp) bnx2x_link_update(&bp->link_params, &bp->link_vars); - if (bp->link_vars.link_up) { - - /* dropless flow control */ - if (!CHIP_IS_E1(bp) && bp->dropless_fc) { - int port = BP_PORT(bp); - u32 pause_enabled = 0; - - if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) - pause_enabled = 1; + bnx2x_init_dropless_fc(bp); - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_ETH_PAUSE_ENABLED_OFFSET(port), - pause_enabled); - } + if (bp->link_vars.link_up) { if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { struct host_port_stats *pstats; @@ -2500,17 +2642,8 @@ static void bnx2x_link_attn(struct bnx2x *bp) bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); } - if (bp->link_vars.link_up && bp->link_vars.line_speed) { - int cmng_fns = bnx2x_get_cmng_fns_mode(bp); - - if (cmng_fns != CMNG_FNS_NONE) { - bnx2x_cmng_fns_init(bp, false, cmng_fns); - storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); - } else - /* rate shaping and fairness are disabled */ - DP(NETIF_MSG_IFUP, - "single function mode without fairness\n"); - } + if (bp->link_vars.link_up && bp->link_vars.line_speed) + bnx2x_set_local_cmng(bp); __bnx2x_link_report(bp); @@ -2524,17 +2657,233 @@ void bnx2x__link_status_update(struct bnx2x *bp) return; /* read updated dcb configuration */ - bnx2x_dcbx_pmf_update(bp); + if (IS_PF(bp)) { + bnx2x_dcbx_pmf_update(bp); + bnx2x_link_status_update(&bp->link_params, &bp->link_vars); + if (bp->link_vars.link_up) + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + else + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + /* indicate link status */ + bnx2x_link_report(bp); + + } else { /* VF */ + bp->port.supported[0] |= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_2500baseX_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + bp->port.advertising[0] = bp->port.supported[0]; + + bp->link_params.bp = bp; + bp->link_params.port = BP_PORT(bp); + bp->link_params.req_duplex[0] = DUPLEX_FULL; + bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE; + bp->link_params.req_line_speed[0] = SPEED_10000; + bp->link_params.speed_cap_mask[0] = 0x7f0000; + bp->link_params.switch_cfg = SWITCH_CFG_10G; + bp->link_vars.mac_type = MAC_TYPE_BMAC; + bp->link_vars.line_speed = SPEED_10000; + bp->link_vars.link_status = + (LINK_STATUS_LINK_UP | + LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); + bp->link_vars.link_up = 1; + bp->link_vars.duplex = DUPLEX_FULL; + bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE; + __bnx2x_link_report(bp); + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + } +} + +static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid, + u16 vlan_val, u8 allowed_prio) +{ + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_afex_update_params *f_update_params = + &func_params.params.afex_update; - bnx2x_link_status_update(&bp->link_params, &bp->link_vars); + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE; - if (bp->link_vars.link_up) - bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); - else - bnx2x_stats_handle(bp, STATS_EVENT_STOP); + /* no need to wait for RAMROD completion, so don't + * set RAMROD_COMP_WAIT flag + */ + + f_update_params->vif_id = vifid; + f_update_params->afex_default_vlan = vlan_val; + f_update_params->allowed_priorities = allowed_prio; - /* indicate link status */ - bnx2x_link_report(bp); + /* if ramrod can not be sent, response to MCP immediately */ + if (bnx2x_func_state_change(bp, &func_params) < 0) + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); + + return 0; +} + +static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type, + u16 vif_index, u8 func_bit_map) +{ + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_afex_viflists_params *update_params = + &func_params.params.afex_viflists; + int rc; + u32 drv_msg_code; + + /* validate only LIST_SET and LIST_GET are received from switch */ + if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET)) + BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n", + cmd_type); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS; + + /* set parameters according to cmd_type */ + update_params->afex_vif_list_command = cmd_type; + update_params->vif_list_index = vif_index; + update_params->func_bit_map = + (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map; + update_params->func_to_clear = 0; + drv_msg_code = + (cmd_type == VIF_LIST_RULE_GET) ? + DRV_MSG_CODE_AFEX_LISTGET_ACK : + DRV_MSG_CODE_AFEX_LISTSET_ACK; + + /* if ramrod can not be sent, respond to MCP immediately for + * SET and GET requests (other are not triggered from MCP) + */ + rc = bnx2x_func_state_change(bp, &func_params); + if (rc < 0) + bnx2x_fw_command(bp, drv_msg_code, 0); + + return 0; +} + +static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd) +{ + struct afex_stats afex_stats; + u32 func = BP_ABS_FUNC(bp); + u32 mf_config; + u16 vlan_val; + u32 vlan_prio; + u16 vif_id; + u8 allowed_prio; + u8 vlan_mode; + u32 addr_to_write, vifid, addrs, stats_type, i; + + if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) { + vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); + DP(BNX2X_MSG_MCP, + "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid); + bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0); + } + + if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) { + vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); + addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]); + DP(BNX2X_MSG_MCP, + "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n", + vifid, addrs); + bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid, + addrs); + } + + if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) { + addr_to_write = SHMEM2_RD(bp, + afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]); + stats_type = SHMEM2_RD(bp, + afex_param1_to_driver[BP_FW_MB_IDX(bp)]); + + DP(BNX2X_MSG_MCP, + "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n", + addr_to_write); + + bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type); + + /* write response to scratchpad, for MCP */ + for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++) + REG_WR(bp, addr_to_write + i*sizeof(u32), + *(((u32 *)(&afex_stats))+i)); + + /* send ack message to MCP */ + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0); + } + + if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) { + mf_config = MF_CFG_RD(bp, func_mf_config[func].config); + bp->mf_config[BP_VN(bp)] = mf_config; + DP(BNX2X_MSG_MCP, + "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n", + mf_config); + + /* if VIF_SET is "enabled" */ + if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) { + /* set rate limit directly to internal RAM */ + struct cmng_init_input cmng_input; + struct rate_shaping_vars_per_vn m_rs_vn; + size_t size = sizeof(struct rate_shaping_vars_per_vn); + u32 addr = BAR_XSTRORM_INTMEM + + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp)); + + bp->mf_config[BP_VN(bp)] = mf_config; + + bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input); + m_rs_vn.vn_counter.rate = + cmng_input.vnic_max_rate[BP_VN(bp)]; + m_rs_vn.vn_counter.quota = + (m_rs_vn.vn_counter.rate * + RS_PERIODIC_TIMEOUT_USEC) / 8; + + __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn); + + /* read relevant values from mf_cfg struct in shmem */ + vif_id = + (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & + FUNC_MF_CFG_E1HOV_TAG_MASK) >> + FUNC_MF_CFG_E1HOV_TAG_SHIFT; + vlan_val = + (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & + FUNC_MF_CFG_AFEX_VLAN_MASK) >> + FUNC_MF_CFG_AFEX_VLAN_SHIFT; + vlan_prio = (mf_config & + FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> + FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT; + vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT); + vlan_mode = + (MF_CFG_RD(bp, + func_mf_config[func].afex_config) & + FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> + FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT; + allowed_prio = + (MF_CFG_RD(bp, + func_mf_config[func].afex_config) & + FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> + FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT; + + /* send ramrod to FW, return in case of failure */ + if (bnx2x_afex_func_update(bp, vif_id, vlan_val, + allowed_prio)) + return; + + bp->afex_def_vlan_tag = vlan_val; + bp->afex_vlan_mode = vlan_mode; + } else { + /* notify link down because BP->flags is disabled */ + bnx2x_link_report(bp); + + /* send INVALID VIF ramrod to FW */ + bnx2x_afex_func_update(bp, 0xFFFF, 0, 0); + + /* Reset the default afex VLAN */ + bp->afex_def_vlan_tag = -1; + } + } } static void bnx2x_pmf_update(struct bnx2x *bp) @@ -2543,7 +2892,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp) u32 val; bp->port.pmf = 1; - DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); + DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); /* * We need the mb() to ensure the ordering between the writing to @@ -2620,6 +2969,17 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) return rc; } +static void storm_memset_func_cfg(struct bnx2x *bp, + struct tstorm_eth_function_common_config *tcfg, + u16 abs_fid) +{ + size_t size = sizeof(struct tstorm_eth_function_common_config); + + u32 addr = BAR_TSTRORM_INTMEM + + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); + + __storm_memset_struct(bp, addr, size, (u32 *)tcfg); +} void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) { @@ -2642,7 +3002,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) } /** - * bnx2x_get_tx_only_flags - Return common flags + * bnx2x_get_common_flags - Return common flags * * @bp device handle * @fp queue handle @@ -2650,9 +3010,9 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) * * Return the flags that are common for the Tx-only and not normal connections. */ -static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp, - struct bnx2x_fastpath *fp, - bool zero_stats) +static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + bool zero_stats) { unsigned long flags = 0; @@ -2660,21 +3020,30 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp, __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); /* tx only connections collect statistics (on the same index as the - * parent connection). The statistics are zeroed when the parent - * connection is initialized. + * parent connection). The statistics are zeroed when the parent + * connection is initialized. */ __set_bit(BNX2X_Q_FLG_STATS, &flags); if (zero_stats) __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); + if (bp->flags & TX_SWITCHING) + __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags); + + __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags); + __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags); + +#ifdef BNX2X_STOP_ON_ERROR + __set_bit(BNX2X_Q_FLG_TX_SEC, &flags); +#endif return flags; } -static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, - struct bnx2x_fastpath *fp, - bool leading) +static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + bool leading) { unsigned long flags = 0; @@ -2682,12 +3051,17 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, if (IS_MF_SD(bp)) __set_bit(BNX2X_Q_FLG_OV, &flags); - if (IS_FCOE_FP(fp)) + if (IS_FCOE_FP(fp)) { __set_bit(BNX2X_Q_FLG_FCOE, &flags); + /* For FCoE - force usage of default priority (for afex) */ + __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); + } if (!fp->disable_tpa) { __set_bit(BNX2X_Q_FLG_TPA, &flags); __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); + if (fp->mode == TPA_MODE_GRO) + __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags); } if (leading) { @@ -2698,6 +3072,9 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, /* Always set HW VLAN stripping */ __set_bit(BNX2X_Q_FLG_VLAN, &flags); + /* configure silent vlan removal */ + if (IS_MF_AFEX(bp)) + __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags); return flags | bnx2x_get_common_flags(bp, fp, true); } @@ -2735,15 +3112,12 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, pause->sge_th_hi + FW_PREFETCH_CNT > MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); - tpa_agg_size = min_t(u32, - (min_t(u32, 8, MAX_SKB_FRAGS) * - SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); + tpa_agg_size = TPA_AGG_SIZE; max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT; max_sge = ((max_sge + PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; - sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE, - 0xffff); + sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff); } /* pause - not for e1 */ @@ -2777,17 +3151,18 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, * placed on the BD (not including paddings). */ rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - - BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; + BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; rxq_init->cl_qzone_id = fp->cl_qzone_id; rxq_init->tpa_agg_sz = tpa_agg_size; rxq_init->sge_buf_sz = sge_sz; rxq_init->max_sges_pkt = max_sge; rxq_init->rss_engine_id = BP_FUNC(bp); + rxq_init->mcast_engine_id = BP_FUNC(bp); /* Maximum number or simultaneous TPA aggregation for this Queue. * - * For PF Clients it should be the maximum avaliable number. + * For PF Clients it should be the maximum available number. * VF driver(s) may want to define it to a smaller value. */ rxq_init->max_tpa_queues = MAX_AGG_QS(bp); @@ -2799,19 +3174,26 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; else rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; + /* configure silent vlan removal + * if multi function mode is afex, then mask default vlan + */ + if (IS_MF_AFEX(bp)) { + rxq_init->silent_removal_value = bp->afex_def_vlan_tag; + rxq_init->silent_removal_mask = VLAN_VID_MASK; + } } static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, u8 cos) { - txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping; + txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping; txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; txq_init->fw_sb_id = fp->fw_sb_id; /* - * set the tss leading client id for TX classfication == + * set the tss leading client id for TX classification == * leading RSS client id */ txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); @@ -2874,7 +3256,7 @@ static void bnx2x_pf_init(struct bnx2x *bp) if (bp->port.pmf) storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); - /* init Event Queue */ + /* init Event Queue - PCI bus guarantees correct endianity*/ eq_data.base_addr.hi = U64_HI(bp->eq_mapping); eq_data.base_addr.lo = U64_LO(bp->eq_mapping); eq_data.producer = bp->eq_prod; @@ -2883,7 +3265,6 @@ static void bnx2x_pf_init(struct bnx2x *bp) storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); } - static void bnx2x_e1h_disable(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -2899,7 +3280,7 @@ static void bnx2x_e1h_enable(struct bnx2x *bp) REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); - /* Tx queue should be only reenabled */ + /* Tx queue should be only re-enabled */ netif_tx_wake_all_queues(bp->dev); /* @@ -2914,17 +3295,29 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) { struct eth_stats_info *ether_stat = &bp->slowpath->drv_info_to_mcp.ether_stat; + struct bnx2x_vlan_mac_obj *mac_obj = + &bp->sp_objs->mac_obj; + int i; - /* leave last char as NULL */ - memcpy(ether_stat->version, DRV_MODULE_VERSION, - ETH_STAT_INFO_VERSION_LEN - 1); - - bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj, - DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, - ether_stat->mac_local); + strlcpy(ether_stat->version, DRV_MODULE_VERSION, + ETH_STAT_INFO_VERSION_LEN); + /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the + * mac_local field in ether_stat struct. The base address is offset by 2 + * bytes to account for the field being 8 bytes but a mac address is + * only 6 bytes. Likewise, the stride for the get_n_elements function is + * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes + * allocated by the ether_stat struct, so the macs will land in their + * proper positions. + */ + for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++) + memset(ether_stat->mac_local + i, 0, + sizeof(ether_stat->mac_local[0])); + mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj, + DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, + ether_stat->mac_local + MAC_PAD, MAC_PAD, + ETH_ALEN); ether_stat->mtu_size = bp->dev->mtu; - if (bp->dev->features & NETIF_F_RXCSUM) ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; if (bp->dev->features & NETIF_F_TSO) @@ -2935,16 +3328,22 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) ether_stat->txq_size = bp->tx_ring_size; ether_stat->rxq_size = bp->rx_ring_size; + +#ifdef CONFIG_BNX2X_SRIOV + ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0; +#endif } static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) { -#ifdef BCM_CNIC struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; struct fcoe_stats_info *fcoe_stat = &bp->slowpath->drv_info_to_mcp.fcoe_stat; - memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN); + if (!CNIC_LOADED(bp)) + return; + + memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN); fcoe_stat->qos_priority = app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; @@ -2952,97 +3351,108 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) /* insert FCoE stats from ramrod response */ if (!NO_FCOE(bp)) { struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = - &bp->fw_stats_data->queue_stats[FCOE_IDX]. + &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. tstorm_queue_statistics; struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = - &bp->fw_stats_data->queue_stats[FCOE_IDX]. + &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. xstorm_queue_statistics; struct fcoe_statistics_params *fw_fcoe_stat = &bp->fw_stats_data->fcoe; - ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo, - fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); + ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0, + fcoe_stat->rx_bytes_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); - ADD_64(fcoe_stat->rx_bytes_hi, - fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, - fcoe_stat->rx_bytes_lo, - fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); + ADD_64_LE(fcoe_stat->rx_bytes_hi, + fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, + fcoe_stat->rx_bytes_lo, + fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); - ADD_64(fcoe_stat->rx_bytes_hi, - fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, - fcoe_stat->rx_bytes_lo, - fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); + ADD_64_LE(fcoe_stat->rx_bytes_hi, + fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, + fcoe_stat->rx_bytes_lo, + fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); - ADD_64(fcoe_stat->rx_bytes_hi, - fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, - fcoe_stat->rx_bytes_lo, - fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); + ADD_64_LE(fcoe_stat->rx_bytes_hi, + fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, + fcoe_stat->rx_bytes_lo, + fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); - ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, - fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); + ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, + fcoe_stat->rx_frames_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); - ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, - fcoe_q_tstorm_stats->rcv_ucast_pkts); + ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, + fcoe_stat->rx_frames_lo, + fcoe_q_tstorm_stats->rcv_ucast_pkts); - ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, - fcoe_q_tstorm_stats->rcv_bcast_pkts); + ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, + fcoe_stat->rx_frames_lo, + fcoe_q_tstorm_stats->rcv_bcast_pkts); - ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, - fcoe_q_tstorm_stats->rcv_mcast_pkts); + ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, + fcoe_stat->rx_frames_lo, + fcoe_q_tstorm_stats->rcv_mcast_pkts); - ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo, - fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); + ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0, + fcoe_stat->tx_bytes_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); - ADD_64(fcoe_stat->tx_bytes_hi, - fcoe_q_xstorm_stats->ucast_bytes_sent.hi, - fcoe_stat->tx_bytes_lo, - fcoe_q_xstorm_stats->ucast_bytes_sent.lo); + ADD_64_LE(fcoe_stat->tx_bytes_hi, + fcoe_q_xstorm_stats->ucast_bytes_sent.hi, + fcoe_stat->tx_bytes_lo, + fcoe_q_xstorm_stats->ucast_bytes_sent.lo); - ADD_64(fcoe_stat->tx_bytes_hi, - fcoe_q_xstorm_stats->bcast_bytes_sent.hi, - fcoe_stat->tx_bytes_lo, - fcoe_q_xstorm_stats->bcast_bytes_sent.lo); + ADD_64_LE(fcoe_stat->tx_bytes_hi, + fcoe_q_xstorm_stats->bcast_bytes_sent.hi, + fcoe_stat->tx_bytes_lo, + fcoe_q_xstorm_stats->bcast_bytes_sent.lo); - ADD_64(fcoe_stat->tx_bytes_hi, - fcoe_q_xstorm_stats->mcast_bytes_sent.hi, - fcoe_stat->tx_bytes_lo, - fcoe_q_xstorm_stats->mcast_bytes_sent.lo); + ADD_64_LE(fcoe_stat->tx_bytes_hi, + fcoe_q_xstorm_stats->mcast_bytes_sent.hi, + fcoe_stat->tx_bytes_lo, + fcoe_q_xstorm_stats->mcast_bytes_sent.lo); - ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, - fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); + ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, + fcoe_stat->tx_frames_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); - ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, - fcoe_q_xstorm_stats->ucast_pkts_sent); + ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, + fcoe_stat->tx_frames_lo, + fcoe_q_xstorm_stats->ucast_pkts_sent); - ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, - fcoe_q_xstorm_stats->bcast_pkts_sent); + ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, + fcoe_stat->tx_frames_lo, + fcoe_q_xstorm_stats->bcast_pkts_sent); - ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, - fcoe_q_xstorm_stats->mcast_pkts_sent); + ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, + fcoe_stat->tx_frames_lo, + fcoe_q_xstorm_stats->mcast_pkts_sent); } /* ask L5 driver to add data to the struct */ bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); -#endif } static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) { -#ifdef BCM_CNIC struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; struct iscsi_stats_info *iscsi_stat = &bp->slowpath->drv_info_to_mcp.iscsi_stat; - memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); + if (!CNIC_LOADED(bp)) + return; + + memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac, + ETH_ALEN); iscsi_stat->qos_priority = app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; /* ask L5 driver to add data to the struct */ bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); -#endif } /* called due to MCP event (on pmf): @@ -3050,7 +3460,7 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) * configure FW * notify others function about the change */ -static inline void bnx2x_config_mf_bw(struct bnx2x *bp) +static void bnx2x_config_mf_bw(struct bnx2x *bp) { if (bp->link_vars.link_up) { bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); @@ -3059,16 +3469,27 @@ static inline void bnx2x_config_mf_bw(struct bnx2x *bp) storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); } -static inline void bnx2x_set_mf_bw(struct bnx2x *bp) +static void bnx2x_set_mf_bw(struct bnx2x *bp) { bnx2x_config_mf_bw(bp); bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); } +static void bnx2x_handle_eee_event(struct bnx2x *bp) +{ + DP(BNX2X_MSG_MCP, "EEE - LLDP event\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); +} + +#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20) +#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25) + static void bnx2x_handle_drv_info_req(struct bnx2x *bp) { enum drv_info_opcode op_code; u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); + bool release = false; + int wait; /* if drv_info version supported by MFW doesn't match - send NACK */ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { @@ -3079,6 +3500,9 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp) op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> DRV_INFO_CONTROL_OP_CODE_SHIFT; + /* Must prevent other flows from accessing drv_info_to_mcp */ + mutex_lock(&bp->drv_info_mutex); + memset(&bp->slowpath->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); @@ -3095,7 +3519,7 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp) default: /* if op code isn't supported - send NACK */ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); - return; + goto out; } /* if we got drv_info attn from MFW then these fields are defined in @@ -3107,6 +3531,106 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp) U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); + + /* Since possible management wants both this and get_driver_version + * need to wait until management notifies us it finished utilizing + * the buffer. + */ + if (!SHMEM2_HAS(bp, mfw_drv_indication)) { + DP(BNX2X_MSG_MCP, "Management does not support indication\n"); + } else if (!bp->drv_info_mng_owner) { + u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1)); + + for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) { + u32 indication = SHMEM2_RD(bp, mfw_drv_indication); + + /* Management is done; need to clear indication */ + if (indication & bit) { + SHMEM2_WR(bp, mfw_drv_indication, + indication & ~bit); + release = true; + break; + } + + msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH); + } + } + if (!release) { + DP(BNX2X_MSG_MCP, "Management did not release indication\n"); + bp->drv_info_mng_owner = true; + } + +out: + mutex_unlock(&bp->drv_info_mutex); +} + +static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format) +{ + u8 vals[4]; + int i = 0; + + if (bnx2x_format) { + i = sscanf(version, "1.%c%hhd.%hhd.%hhd", + &vals[0], &vals[1], &vals[2], &vals[3]); + if (i > 0) + vals[0] -= '0'; + } else { + i = sscanf(version, "%hhd.%hhd.%hhd.%hhd", + &vals[0], &vals[1], &vals[2], &vals[3]); + } + + while (i < 4) + vals[i++] = 0; + + return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3]; +} + +void bnx2x_update_mng_version(struct bnx2x *bp) +{ + u32 iscsiver = DRV_VER_NOT_LOADED; + u32 fcoever = DRV_VER_NOT_LOADED; + u32 ethver = DRV_VER_NOT_LOADED; + int idx = BP_FW_MB_IDX(bp); + u8 *version; + + if (!SHMEM2_HAS(bp, func_os_drv_ver)) + return; + + mutex_lock(&bp->drv_info_mutex); + /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */ + if (bp->drv_info_mng_owner) + goto out; + + if (bp->state != BNX2X_STATE_OPEN) + goto out; + + /* Parse ethernet driver version */ + ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); + if (!CNIC_LOADED(bp)) + goto out; + + /* Try getting storage driver version via cnic */ + memset(&bp->slowpath->drv_info_to_mcp, 0, + sizeof(union drv_info_to_mcp)); + bnx2x_drv_info_iscsi_stat(bp); + version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version; + iscsiver = bnx2x_update_mng_version_utility(version, false); + + memset(&bp->slowpath->drv_info_to_mcp, 0, + sizeof(union drv_info_to_mcp)); + bnx2x_drv_info_fcoe_stat(bp); + version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version; + fcoever = bnx2x_update_mng_version_utility(version, false); + +out: + SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver); + SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver); + SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever); + + mutex_unlock(&bp->drv_info_mutex); + + DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n", + ethver, iscsiver, fcoever); } static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) @@ -3121,12 +3645,12 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) * locks */ if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { - DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); + DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n"); bp->flags |= MF_FUNC_DIS; bnx2x_e1h_disable(bp); } else { - DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); + DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n"); bp->flags &= ~MF_FUNC_DIS; bnx2x_e1h_enable(bp); @@ -3146,14 +3670,14 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) } /* must be called under the spq lock */ -static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) +static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) { struct eth_spe *next_spe = bp->spq_prod_bd; if (bp->spq_prod_bd == bp->spq_last_bd) { bp->spq_prod_bd = bp->spq; bp->spq_prod_idx = 0; - DP(NETIF_MSG_TIMER, "end of spq\n"); + DP(BNX2X_MSG_SP, "end of spq\n"); } else { bp->spq_prod_bd++; bp->spq_prod_idx++; @@ -3162,7 +3686,7 @@ static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) } /* must be called under the spq lock */ -static inline void bnx2x_sp_prod_update(struct bnx2x *bp) +static void bnx2x_sp_prod_update(struct bnx2x *bp) { int func = BP_FUNC(bp); @@ -3184,7 +3708,7 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp) * @cmd: command to check * @cmd_type: command type */ -static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) +static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) { if ((cmd_type == NONE_CONNECTION_TYPE) || (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || @@ -3196,10 +3720,8 @@ static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) return true; else return false; - } - /** * bnx2x_sp_post - place a single command on an SP ring * @@ -3222,8 +3744,10 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, bool common = bnx2x_is_contextless_ramrod(command, cmd_type); #ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) + if (unlikely(bp->panic)) { + BNX2X_ERR("Can't post SP when there is panic\n"); return -EIO; + } #endif spin_lock_bh(&bp->spq_lock); @@ -3249,10 +3773,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(bp, cid)); - type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; - - type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & - SPE_HDR_FUNCTION_ID); + /* In some cases, type may already contain the func-id + * mainly in SRIOV related use cases, so we add it here only + * if it's not already set. + */ + if (!(cmd_type & SPE_HDR_FUNCTION_ID)) { + type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & + SPE_HDR_CONN_TYPE; + type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & + SPE_HDR_FUNCTION_ID); + } else { + type = cmd_type; + } spe->hdr.type = cpu_to_le16(type); @@ -3262,17 +3794,15 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, /* * It's ok if the actual decrement is issued towards the memory * somewhere between the spin_lock and spin_unlock. Thus no - * more explict memory barrier is needed. + * more explicit memory barrier is needed. */ if (common) atomic_dec(&bp->eq_spq_left); else atomic_dec(&bp->cq_spq_left); - - DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, - "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) " - "type(0x%x) left (CQ, EQ) (%x,%x)\n", + DP(BNX2X_MSG_SP, + "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) + (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, @@ -3292,15 +3822,14 @@ static int bnx2x_acquire_alr(struct bnx2x *bp) might_sleep(); for (j = 0; j < 1000; j++) { - val = (1UL << 31); - REG_WR(bp, GRCBASE_MCP + 0x9c, val); - val = REG_RD(bp, GRCBASE_MCP + 0x9c); - if (val & (1L << 31)) + REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK); + val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK); + if (val & MCPR_ACCESS_LOCK_LOCK) break; - msleep(5); + usleep_range(5000, 10000); } - if (!(val & (1L << 31))) { + if (!(val & MCPR_ACCESS_LOCK_LOCK)) { BNX2X_ERR("Cannot acquire MCP access lock register\n"); rc = -EBUSY; } @@ -3311,13 +3840,13 @@ static int bnx2x_acquire_alr(struct bnx2x *bp) /* release split MCP access lock register */ static void bnx2x_release_alr(struct bnx2x *bp) { - REG_WR(bp, GRCBASE_MCP + 0x9c, 0); + REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); } #define BNX2X_DEF_SB_ATT_IDX 0x0001 #define BNX2X_DEF_SB_IDX 0x0002 -static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) +static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) { struct host_sp_status_block *def_sb = bp->def_status_blk; u16 rc = 0; @@ -3333,7 +3862,7 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) rc |= BNX2X_DEF_SB_IDX; } - /* Do not reorder: indecies reading should complete before handling */ + /* Do not reorder: indices reading should complete before handling */ barrier(); return rc; } @@ -3444,12 +3973,27 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) /* now set back the mask */ if (asserted & ATTN_NIG_FOR_FUNC) { + /* Verify that IGU ack through BAR was written before restoring + * NIG mask. This loop should exit after 2-3 iterations max. + */ + if (bp->common.int_block != INT_BLOCK_HC) { + u32 cnt = 0, igu_acked; + do { + igu_acked = REG_RD(bp, + IGU_REG_ATTENTION_ACK_BITS); + } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && + (++cnt < MAX_IGU_ATTN_ACK_TO)); + if (!igu_acked) + DP(NETIF_MSG_HW, + "Failed to verify IGU ack on time\n"); + barrier(); + } REG_WR(bp, nig_int_mask_addr, nig_mask); bnx2x_release_phy_lock(bp); } } -static inline void bnx2x_fan_failure(struct bnx2x *bp) +static void bnx2x_fan_failure(struct bnx2x *bp) { int port = BP_PORT(bp); u32 ext_phy_config; @@ -3464,23 +4008,17 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp) ext_phy_config); /* log the failure */ - netdev_err(bp->dev, "Fan Failure on Network Controller has caused" - " the driver to shutdown the card to prevent permanent" - " damage. Please contact OEM Support for assistance\n"); + netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" + "Please contact OEM Support for assistance\n"); - /* - * Scheudle device reset (unload) + /* Schedule device reset (unload) * This is due to some boards consuming sufficient power when driver is * up to overheat if fan fails. */ - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); - + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0); } -static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) +static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) { int port = BP_PORT(bp); int reg_offset; @@ -3520,7 +4058,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) } } -static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) +static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) { u32 val; @@ -3551,7 +4089,7 @@ static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) } } -static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) +static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) { u32 val; @@ -3595,7 +4133,7 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) } } -static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) +static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) { u32 val; @@ -3605,6 +4143,7 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) int func = BP_FUNC(bp); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); + bnx2x_read_mf_cfg(bp); bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, func_mf_config[BP_ABS_FUNC(bp)].config); val = SHMEM_RD(bp, @@ -3618,6 +4157,11 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) if (val & DRV_STATUS_DRV_INFO_REQ) bnx2x_handle_drv_info_req(bp); + + if (val & DRV_STATUS_VF_DISABLED) + bnx2x_schedule_iov_task(bp, + BNX2X_IOV_HANDLE_FLR); + if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) bnx2x_pmf_update(bp); @@ -3627,6 +4171,11 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) /* start dcbx state machine */ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_NEG_RECEIVED); + if (val & DRV_STATUS_AFEX_EVENT_MASK) + bnx2x_handle_afex_cmd(bp, + val & DRV_STATUS_AFEX_EVENT_MASK); + if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) + bnx2x_handle_eee_event(bp); if (bp->link_vars.periodic_flags & PERIODIC_FLAGS_LINK_EVENT) { /* sync with link */ @@ -3709,11 +4258,11 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) */ void bnx2x_set_reset_global(struct bnx2x *bp) { - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - + u32 val; + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); - barrier(); - mmiowb(); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); } /* @@ -3721,13 +4270,13 @@ void bnx2x_set_reset_global(struct bnx2x *bp) * * Should be run under rtnl lock */ -static inline void bnx2x_clear_reset_global(struct bnx2x *bp) +static void bnx2x_clear_reset_global(struct bnx2x *bp) { - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - + u32 val; + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); - barrier(); - mmiowb(); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); } /* @@ -3735,9 +4284,9 @@ static inline void bnx2x_clear_reset_global(struct bnx2x *bp) * * should be run under rtnl lock */ -static inline bool bnx2x_reset_is_global(struct bnx2x *bp) +static bool bnx2x_reset_is_global(struct bnx2x *bp) { - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; @@ -3748,17 +4297,19 @@ static inline bool bnx2x_reset_is_global(struct bnx2x *bp) * * Should be run under rtnl lock */ -static inline void bnx2x_set_reset_done(struct bnx2x *bp) +static void bnx2x_set_reset_done(struct bnx2x *bp) { - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 val; u32 bit = BP_PATH(bp) ? BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); /* Clear the bit */ val &= ~bit; REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); - barrier(); - mmiowb(); + + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); } /* @@ -3768,15 +4319,16 @@ static inline void bnx2x_set_reset_done(struct bnx2x *bp) */ void bnx2x_set_reset_in_progress(struct bnx2x *bp) { - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 val; u32 bit = BP_PATH(bp) ? BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); /* Set the bit */ val |= bit; REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); - barrier(); - mmiowb(); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); } /* @@ -3785,7 +4337,7 @@ void bnx2x_set_reset_in_progress(struct bnx2x *bp) */ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) { - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); u32 bit = engine ? BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; @@ -3794,25 +4346,28 @@ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) } /* - * Increment the load counter for the current engine. + * set pf load for the current pf. * * should be run under rtnl lock */ -void bnx2x_inc_load_cnt(struct bnx2x *bp) +void bnx2x_set_pf_load(struct bnx2x *bp) { - u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 val1, val; u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : BNX2X_PATH0_LOAD_CNT_MASK; u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : BNX2X_PATH0_LOAD_CNT_SHIFT; - DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val); /* get the current counter value */ val1 = (val & mask) >> shift; - /* increment... */ - val1++; + /* set bit of that PF */ + val1 |= (1 << bp->pf_num); /* clear the old value */ val &= ~mask; @@ -3821,34 +4376,35 @@ void bnx2x_inc_load_cnt(struct bnx2x *bp) val |= ((val1 << shift) & mask); REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); - barrier(); - mmiowb(); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); } /** - * bnx2x_dec_load_cnt - decrement the load counter + * bnx2x_clear_pf_load - clear pf load mark * * @bp: driver handle * * Should be run under rtnl lock. * Decrements the load counter for the current engine. Returns - * the new counter value. + * whether other functions are still loaded */ -u32 bnx2x_dec_load_cnt(struct bnx2x *bp) +bool bnx2x_clear_pf_load(struct bnx2x *bp) { - u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 val1, val; u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : BNX2X_PATH0_LOAD_CNT_MASK; u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : BNX2X_PATH0_LOAD_CNT_SHIFT; - DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val); /* get the current counter value */ val1 = (val & mask) >> shift; - /* decrement... */ - val1--; + /* clear bit of that PF */ + val1 &= ~(1 << bp->pf_num); /* clear the old value */ val &= ~mask; @@ -3857,18 +4413,16 @@ u32 bnx2x_dec_load_cnt(struct bnx2x *bp) val |= ((val1 << shift) & mask); REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); - barrier(); - mmiowb(); - - return val1; + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); + return val1 != 0; } /* - * Read the load counter for the current engine. + * Read the load status for the current engine. * * should be run under rtnl lock */ -static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine) +static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) { u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : BNX2X_PATH0_LOAD_CNT_MASK); @@ -3876,72 +4430,80 @@ static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine) BNX2X_PATH0_LOAD_CNT_SHIFT); u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val); + DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val); val = (val & mask) >> shift; - DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val); + DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n", + engine, val); - return val; + return val != 0; } -/* - * Reset the load counter for the current engine. - * - * should be run under rtnl lock - */ -static inline void bnx2x_clear_load_cnt(struct bnx2x *bp) +static void _print_parity(struct bnx2x *bp, u32 reg) { - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : - BNX2X_PATH0_LOAD_CNT_MASK); - - REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask)); + pr_cont(" [0x%08x] ", REG_RD(bp, reg)); } -static inline void _print_next_block(int idx, const char *blk) +static void _print_next_block(int idx, const char *blk) { pr_cont("%s%s", idx ? ", " : "", blk); } -static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, - bool print) +static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, + int *par_num, bool print) { - int i = 0; - u32 cur_bit = 0; + u32 cur_bit; + bool res; + int i; + + res = false; + for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); + cur_bit = (0x1UL << i); if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "BRB"); - break; - case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "PARSER"); - break; - case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "TSDM"); - break; - case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: - if (print) - _print_next_block(par_num++, + res |= true; /* Each bit is real error! */ + + if (print) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: + _print_next_block((*par_num)++, "BRB"); + _print_parity(bp, + BRB1_REG_BRB1_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: + _print_next_block((*par_num)++, + "PARSER"); + _print_parity(bp, PRS_REG_PRS_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: + _print_next_block((*par_num)++, "TSDM"); + _print_parity(bp, + TSDM_REG_TSDM_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: + _print_next_block((*par_num)++, "SEARCHER"); - break; - case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "TCM"); - break; - case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "TSEMI"); - break; - case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "XPB"); - break; + _print_parity(bp, SRC_REG_SRC_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: + _print_next_block((*par_num)++, "TCM"); + _print_parity(bp, TCM_REG_TCM_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: + _print_next_block((*par_num)++, + "TSEMI"); + _print_parity(bp, + TSEM_REG_TSEM_PRTY_STS_0); + _print_parity(bp, + TSEM_REG_TSEM_PRTY_STS_1); + break; + case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: + _print_next_block((*par_num)++, "XPB"); + _print_parity(bp, GRCBASE_XPB + + PB_REG_PB_PRTY_STS); + break; + } } /* Clear the bit */ @@ -3949,84 +4511,142 @@ static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, } } - return par_num; + return res; } -static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, - bool *global, bool print) +static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, + int *par_num, bool *global, + bool print) { - int i = 0; - u32 cur_bit = 0; + u32 cur_bit; + bool res; + int i; + + res = false; + for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); + cur_bit = (0x1UL << i); if (sig & cur_bit) { + res |= true; /* Each bit is real error! */ switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "PBF"); + if (print) { + _print_next_block((*par_num)++, "PBF"); + _print_parity(bp, PBF_REG_PBF_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "QM"); + if (print) { + _print_next_block((*par_num)++, "QM"); + _print_parity(bp, QM_REG_QM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "TM"); + if (print) { + _print_next_block((*par_num)++, "TM"); + _print_parity(bp, TM_REG_TM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "XSDM"); + if (print) { + _print_next_block((*par_num)++, "XSDM"); + _print_parity(bp, + XSDM_REG_XSDM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "XCM"); + if (print) { + _print_next_block((*par_num)++, "XCM"); + _print_parity(bp, XCM_REG_XCM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "XSEMI"); + if (print) { + _print_next_block((*par_num)++, + "XSEMI"); + _print_parity(bp, + XSEM_REG_XSEM_PRTY_STS_0); + _print_parity(bp, + XSEM_REG_XSEM_PRTY_STS_1); + } break; case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: - if (print) - _print_next_block(par_num++, + if (print) { + _print_next_block((*par_num)++, "DOORBELLQ"); + _print_parity(bp, + DORQ_REG_DORQ_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "NIG"); + if (print) { + _print_next_block((*par_num)++, "NIG"); + if (CHIP_IS_E1x(bp)) { + _print_parity(bp, + NIG_REG_NIG_PRTY_STS); + } else { + _print_parity(bp, + NIG_REG_NIG_PRTY_STS_0); + _print_parity(bp, + NIG_REG_NIG_PRTY_STS_1); + } + } break; case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: if (print) - _print_next_block(par_num++, + _print_next_block((*par_num)++, "VAUX PCI CORE"); *global = true; break; case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "DEBUG"); + if (print) { + _print_next_block((*par_num)++, + "DEBUG"); + _print_parity(bp, DBG_REG_DBG_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "USDM"); + if (print) { + _print_next_block((*par_num)++, "USDM"); + _print_parity(bp, + USDM_REG_USDM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "UCM"); + if (print) { + _print_next_block((*par_num)++, "UCM"); + _print_parity(bp, UCM_REG_UCM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "USEMI"); + if (print) { + _print_next_block((*par_num)++, + "USEMI"); + _print_parity(bp, + USEM_REG_USEM_PRTY_STS_0); + _print_parity(bp, + USEM_REG_USEM_PRTY_STS_1); + } break; case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "UPB"); + if (print) { + _print_next_block((*par_num)++, "UPB"); + _print_parity(bp, GRCBASE_UPB + + PB_REG_PB_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "CSDM"); + if (print) { + _print_next_block((*par_num)++, "CSDM"); + _print_parity(bp, + CSDM_REG_CSDM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "CCM"); + if (print) { + _print_next_block((*par_num)++, "CCM"); + _print_parity(bp, CCM_REG_CCM_PRTY_STS); + } break; } @@ -4035,51 +4655,73 @@ static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, } } - return par_num; + return res; } -static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, - bool print) +static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, + int *par_num, bool print) { - int i = 0; - u32 cur_bit = 0; + u32 cur_bit; + bool res; + int i; + + res = false; + for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); + cur_bit = (0x1UL << i); if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "CSEMI"); - break; - case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "PXP"); - break; - case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: - if (print) - _print_next_block(par_num++, - "PXPPCICLOCKCLIENT"); - break; - case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "CFC"); - break; - case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "CDU"); - break; - case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "DMAE"); - break; - case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "IGU"); - break; - case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "MISC"); - break; + res |= true; /* Each bit is real error! */ + if (print) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: + _print_next_block((*par_num)++, + "CSEMI"); + _print_parity(bp, + CSEM_REG_CSEM_PRTY_STS_0); + _print_parity(bp, + CSEM_REG_CSEM_PRTY_STS_1); + break; + case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: + _print_next_block((*par_num)++, "PXP"); + _print_parity(bp, PXP_REG_PXP_PRTY_STS); + _print_parity(bp, + PXP2_REG_PXP2_PRTY_STS_0); + _print_parity(bp, + PXP2_REG_PXP2_PRTY_STS_1); + break; + case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: + _print_next_block((*par_num)++, + "PXPPCICLOCKCLIENT"); + break; + case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: + _print_next_block((*par_num)++, "CFC"); + _print_parity(bp, + CFC_REG_CFC_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: + _print_next_block((*par_num)++, "CDU"); + _print_parity(bp, CDU_REG_CDU_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: + _print_next_block((*par_num)++, "DMAE"); + _print_parity(bp, + DMAE_REG_DMAE_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: + _print_next_block((*par_num)++, "IGU"); + if (CHIP_IS_E1x(bp)) + _print_parity(bp, + HC_REG_HC_PRTY_STS); + else + _print_parity(bp, + IGU_REG_IGU_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: + _print_next_block((*par_num)++, "MISC"); + _print_parity(bp, + MISC_REG_MISC_PRTY_STS); + break; + } } /* Clear the bit */ @@ -4087,40 +4729,49 @@ static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, } } - return par_num; + return res; } -static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, - bool *global, bool print) +static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, + int *par_num, bool *global, + bool print) { - int i = 0; - u32 cur_bit = 0; + bool res = false; + u32 cur_bit; + int i; + for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); + cur_bit = (0x1UL << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: if (print) - _print_next_block(par_num++, "MCP ROM"); + _print_next_block((*par_num)++, + "MCP ROM"); *global = true; + res |= true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: if (print) - _print_next_block(par_num++, + _print_next_block((*par_num)++, "MCP UMP RX"); *global = true; + res |= true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: if (print) - _print_next_block(par_num++, + _print_next_block((*par_num)++, "MCP UMP TX"); *global = true; + res |= true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: if (print) - _print_next_block(par_num++, + _print_next_block((*par_num)++, "MCP SCPAD"); - *global = true; + /* clear latched SCPAD PATIRY from MCP */ + REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, + 1UL << 10); break; } @@ -4129,48 +4780,58 @@ static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, } } - return par_num; + return res; } -static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, - bool print) +static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, + int *par_num, bool print) { - int i = 0; - u32 cur_bit = 0; + u32 cur_bit; + bool res; + int i; + + res = false; + for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); + cur_bit = (0x1UL << i); if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "PGLUE_B"); - break; - case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "ATC"); - break; + res |= true; /* Each bit is real error! */ + if (print) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: + _print_next_block((*par_num)++, + "PGLUE_B"); + _print_parity(bp, + PGLUE_B_REG_PGLUE_B_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: + _print_next_block((*par_num)++, "ATC"); + _print_parity(bp, + ATC_REG_ATC_PRTY_STS); + break; + } } - /* Clear the bit */ sig &= ~cur_bit; } } - return par_num; + return res; } -static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, - u32 *sig) +static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, + u32 *sig) { + bool res = false; + if ((sig[0] & HW_PRTY_ASSERT_SET_0) || (sig[1] & HW_PRTY_ASSERT_SET_1) || (sig[2] & HW_PRTY_ASSERT_SET_2) || (sig[3] & HW_PRTY_ASSERT_SET_3) || (sig[4] & HW_PRTY_ASSERT_SET_4)) { int par_num = 0; - DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: " - "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x " - "[4]:0x%08x\n", + DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" + "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", sig[0] & HW_PRTY_ASSERT_SET_0, sig[1] & HW_PRTY_ASSERT_SET_1, sig[2] & HW_PRTY_ASSERT_SET_2, @@ -4179,23 +4840,22 @@ static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, if (print) netdev_err(bp->dev, "Parity errors detected in blocks: "); - par_num = bnx2x_check_blocks_with_parity0( - sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); - par_num = bnx2x_check_blocks_with_parity1( - sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); - par_num = bnx2x_check_blocks_with_parity2( - sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); - par_num = bnx2x_check_blocks_with_parity3( - sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); - par_num = bnx2x_check_blocks_with_parity4( - sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); + res |= bnx2x_check_blocks_with_parity0(bp, + sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print); + res |= bnx2x_check_blocks_with_parity1(bp, + sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print); + res |= bnx2x_check_blocks_with_parity2(bp, + sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print); + res |= bnx2x_check_blocks_with_parity3(bp, + sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print); + res |= bnx2x_check_blocks_with_parity4(bp, + sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print); if (print) pr_cont("\n"); + } - return true; - } else - return false; + return res; } /** @@ -4222,6 +4882,14 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); + /* Since MCP attentions can't be disabled inside the block, we need to + * read AEU registers to see whether they're currently disabled + */ + attn.sig[3] &= ((REG_RD(bp, + !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 + : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) & + MISC_AEU_ENABLE_MCP_PRTY_BITS) | + ~MISC_AEU_ENABLE_MCP_PRTY_BITS); if (!CHIP_IS_E1x(bp)) attn.sig[4] = REG_RD(bp, @@ -4231,8 +4899,7 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) return bnx2x_parity_attn(bp, global, print, attn.sig); } - -static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) +static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) { u32 val; if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { @@ -4240,34 +4907,25 @@ static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); BNX2X_ERR("PGLUE hw attention 0x%x\n", val); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "ADDRESS_ERROR\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "INCORRECT_RCV_BEHAVIOR\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "WAS_ERROR_ATTN\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "VF_LENGTH_VIOLATION_ATTN\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "VF_GRC_SPACE_VIOLATION_ATTN\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "VF_MSIX_BAR_VIOLATION_ATTN\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "TCPL_ERROR_ATTN\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "TCPL_IN_TWO_RCBS_ATTN\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "CSSNOOP_FIFO_OVERFLOW\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); } if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); @@ -4275,19 +4933,15 @@ static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) - BNX2X_ERR("ATC_ATC_INT_STS_REG" - "_ATC_TCPL_TO_NOT_PEND\n"); + BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) - BNX2X_ERR("ATC_ATC_INT_STS_REG_" - "ATC_GPA_MULTIPLE_HITS\n"); + BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) - BNX2X_ERR("ATC_ATC_INT_STS_REG_" - "ATC_RCPL_TO_EMPTY_CNT\n"); + BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) - BNX2X_ERR("ATC_ATC_INT_STS_REG_" - "ATC_IREQ_LESS_THAN_STU\n"); + BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); } if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | @@ -4296,7 +4950,6 @@ static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); } - } static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) @@ -4346,8 +4999,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) if (deasserted & (1 << index)) { group_mask = &bp->attn_group[index]; - DP(NETIF_MSG_HW, "group[%d]: %08x %08x " - "%08x %08x %08x\n", + DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n", index, group_mask->sig[0], group_mask->sig[1], group_mask->sig[2], group_mask->sig[3], @@ -4432,20 +5084,19 @@ static void bnx2x_attn_int(struct bnx2x *bp) void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, u16 index, u8 op, u8 update) { - u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; - + u32 igu_addr = bp->igu_base_addr; + igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, igu_addr); } -static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) +static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) { /* No memory barriers */ storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); mmiowb(); /* keep prod updates ordered */ } -#ifdef BCM_CNIC static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, union event_ring_elem *elem) { @@ -4462,14 +5113,13 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", cid); - bnx2x_panic_dump(bp); + bnx2x_panic_dump(bp, false); } bnx2x_cnic_cfc_comp(bp, cid, err); return 0; } -#endif -static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp) +static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) { struct bnx2x_mcast_ramrod_params rparam; int rc; @@ -4494,8 +5144,8 @@ static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp) netif_addr_unlock_bh(bp->dev); } -static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, - union event_ring_elem *elem) +static void bnx2x_handle_classification_eqe(struct bnx2x *bp, + union event_ring_elem *elem) { unsigned long ramrod_flags = 0; int rc = 0; @@ -4505,17 +5155,18 @@ static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, /* Always push next commands out, don't wait here */ __set_bit(RAMROD_CONT, &ramrod_flags); - switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { + switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo) + >> BNX2X_SWCID_SHIFT) { case BNX2X_FILTER_MAC_PENDING: -#ifdef BCM_CNIC - if (cid == BNX2X_ISCSI_ETH_CID) + DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); + if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp))) vlan_mac_obj = &bp->iscsi_l2_mac_obj; else -#endif - vlan_mac_obj = &bp->fp[cid].mac_obj; + vlan_mac_obj = &bp->sp_objs[cid].mac_obj; break; case BNX2X_FILTER_MCAST_PENDING: + DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); /* This is only relevant for 57710 where multicast MACs are * configured as unicast MACs using the same ramrod. */ @@ -4533,14 +5184,11 @@ static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, BNX2X_ERR("Failed to schedule new commands: %d\n", rc); else if (rc > 0) DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); - } -#ifdef BCM_CNIC static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); -#endif -static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) +static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) { netif_addr_lock_bh(bp->dev); @@ -4549,37 +5197,116 @@ static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) /* Send rx_mode command again if was requested */ if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) bnx2x_set_storm_rx_mode(bp); -#ifdef BCM_CNIC else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state)) bnx2x_set_iscsi_eth_rx_mode(bp, true); else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state)) bnx2x_set_iscsi_eth_rx_mode(bp, false); -#endif netif_addr_unlock_bh(bp->dev); } -static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( +static void bnx2x_after_afex_vif_lists(struct bnx2x *bp, + union event_ring_elem *elem) +{ + if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) { + DP(BNX2X_MSG_SP, + "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n", + elem->message.data.vif_list_event.func_bit_map); + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK, + elem->message.data.vif_list_event.func_bit_map); + } else if (elem->message.data.vif_list_event.echo == + VIF_LIST_RULE_SET) { + DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0); + } +} + +/* called with rtnl_lock */ +static void bnx2x_after_function_update(struct bnx2x *bp) +{ + int q, rc; + struct bnx2x_fastpath *fp; + struct bnx2x_queue_state_params queue_params = {NULL}; + struct bnx2x_queue_update_params *q_update_params = + &queue_params.params.update; + + /* Send Q update command with afex vlan removal values for all Qs */ + queue_params.cmd = BNX2X_Q_CMD_UPDATE; + + /* set silent vlan removal values according to vlan mode */ + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + &q_update_params->update_flags); + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &q_update_params->update_flags); + __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); + + /* in access mode mark mask and value are 0 to strip all vlans */ + if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { + q_update_params->silent_removal_value = 0; + q_update_params->silent_removal_mask = 0; + } else { + q_update_params->silent_removal_value = + (bp->afex_def_vlan_tag & VLAN_VID_MASK); + q_update_params->silent_removal_mask = VLAN_VID_MASK; + } + + for_each_eth_queue(bp, q) { + /* Set the appropriate Queue object */ + fp = &bp->fp[q]; + queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + + /* send the ramrod */ + rc = bnx2x_queue_state_change(bp, &queue_params); + if (rc < 0) + BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", + q); + } + + if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) { + fp = &bp->fp[FCOE_IDX(bp)]; + queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + + /* clear pending completion bit */ + __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); + + /* mark latest Q bit */ + smp_mb__before_atomic(); + set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); + smp_mb__after_atomic(); + + /* send Q update ramrod for FCoE Q */ + rc = bnx2x_queue_state_change(bp, &queue_params); + if (rc < 0) + BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", + q); + } else { + /* If no FCoE ring - ACK MCP now */ + bnx2x_link_report(bp); + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); + } +} + +static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( struct bnx2x *bp, u32 cid) { DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); -#ifdef BCM_CNIC - if (cid == BNX2X_FCOE_ETH_CID) - return &bnx2x_fcoe(bp, q_obj); + + if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp))) + return &bnx2x_fcoe_sp_obj(bp, q_obj); else -#endif - return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj); + return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; } static void bnx2x_eq_int(struct bnx2x *bp) { u16 hw_cons, sw_cons, sw_prod; union event_ring_elem *elem; + u8 echo; u32 cid; u8 opcode; - int spqe_cnt = 0; + int rc, spqe_cnt = 0; struct bnx2x_queue_sp_obj *q_obj; struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; @@ -4587,7 +5314,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) hw_cons = le16_to_cpu(*bp->eq_cons_sb); /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. - * when we get the the next-page we nned to adjust so the loop + * when we get the next-page we need to adjust so the loop * condition below will be met. The next element is the size of a * regular element and hence incrementing by 1 */ @@ -4607,18 +5334,31 @@ static void bnx2x_eq_int(struct bnx2x *bp) for (; sw_cons != hw_cons; sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { - elem = &bp->eq_ring[EQ_DESC(sw_cons)]; - cid = SW_CID(elem->message.data.cfc_del_event.cid); - opcode = elem->message.opcode; + rc = bnx2x_iov_eq_sp_event(bp, elem); + if (!rc) { + DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n", + rc); + goto next_spqe; + } + /* elem CID originates from FW; actually LE */ + cid = SW_CID((__force __le32) + elem->message.data.cfc_del_event.cid); + opcode = elem->message.opcode; /* handle eq element */ switch (opcode) { + case EVENT_RING_OPCODE_VF_PF_CHANNEL: + bnx2x_vf_mbx_schedule(bp, + &elem->message.data.vf_pf_event); + continue; + case EVENT_RING_OPCODE_STAT_QUERY: - DP(NETIF_MSG_TIMER, "got statistics comp event %d\n", - bp->stats_comp++); + DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS), + "got statistics comp event %d\n", + bp->stats_comp++); /* nothing to do with stats comp */ goto next_spqe; @@ -4630,43 +5370,76 @@ static void bnx2x_eq_int(struct bnx2x *bp) */ DP(BNX2X_MSG_SP, "got delete ramrod for MULTI[%d]\n", cid); -#ifdef BCM_CNIC - if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) + + if (CNIC_LOADED(bp) && + !bnx2x_cnic_handle_cfc_del(bp, cid, elem)) goto next_spqe; -#endif + q_obj = bnx2x_cid_to_q_obj(bp, cid); if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) break; - - goto next_spqe; case EVENT_RING_OPCODE_STOP_TRAFFIC: - DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n"); + DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_STOP)) break; - bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); goto next_spqe; case EVENT_RING_OPCODE_START_TRAFFIC: - DP(BNX2X_MSG_SP, "got START TRAFFIC\n"); + DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_START)) break; - bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); + goto next_spqe; + + case EVENT_RING_OPCODE_FUNCTION_UPDATE: + echo = elem->message.data.function_update_event.echo; + if (echo == SWITCH_UPDATE) { + DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, + "got FUNC_SWITCH_UPDATE ramrod\n"); + if (f_obj->complete_cmd( + bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE)) + break; + + } else { + int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE; + + DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, + "AFEX: ramrod completed FUNCTION_UPDATE\n"); + f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_AFEX_UPDATE); + + /* We will perform the Queues update from + * sp_rtnl task as all Queue SP operations + * should run under rtnl_lock. + */ + bnx2x_schedule_sp_rtnl(bp, cmd, 0); + } + + goto next_spqe; + + case EVENT_RING_OPCODE_AFEX_VIF_LISTS: + f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_AFEX_VIFLISTS); + bnx2x_after_afex_vif_lists(bp, elem); goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_START: - DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n"); + DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, + "got FUNC_START ramrod\n"); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) break; goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_STOP: - DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n"); + DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, + "got FUNC_STOP ramrod\n"); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) break; @@ -4727,7 +5500,7 @@ next_spqe: spqe_cnt++; } /* for */ - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_add(spqe_cnt, &bp->eq_spq_left); bp->eq_cons = sw_cons; @@ -4742,51 +5515,64 @@ next_spqe: static void bnx2x_sp_task(struct work_struct *work) { struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); - u16 status; - status = bnx2x_update_dsb_idx(bp); -/* if (status == 0) */ -/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ + DP(BNX2X_MSG_SP, "sp task invoked\n"); - DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status); + /* make sure the atomic interrupt_occurred has been written */ + smp_rmb(); + if (atomic_read(&bp->interrupt_occurred)) { - /* HW attentions */ - if (status & BNX2X_DEF_SB_ATT_IDX) { - bnx2x_attn_int(bp); - status &= ~BNX2X_DEF_SB_ATT_IDX; - } + /* what work needs to be performed? */ + u16 status = bnx2x_update_dsb_idx(bp); - /* SP events: STAT_QUERY and others */ - if (status & BNX2X_DEF_SB_IDX) { -#ifdef BCM_CNIC - struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); + DP(BNX2X_MSG_SP, "status %x\n", status); + DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n"); + atomic_set(&bp->interrupt_occurred, 0); - if ((!NO_FCOE(bp)) && - (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { - /* - * Prevent local bottom-halves from running as - * we are going to change the local NAPI list. - */ - local_bh_disable(); - napi_schedule(&bnx2x_fcoe(bp, napi)); - local_bh_enable(); + /* HW attentions */ + if (status & BNX2X_DEF_SB_ATT_IDX) { + bnx2x_attn_int(bp); + status &= ~BNX2X_DEF_SB_ATT_IDX; } -#endif - /* Handle EQ completions */ - bnx2x_eq_int(bp); - bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, - le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); + /* SP events: STAT_QUERY and others */ + if (status & BNX2X_DEF_SB_IDX) { + struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); - status &= ~BNX2X_DEF_SB_IDX; - } + if (FCOE_INIT(bp) && + (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + /* Prevent local bottom-halves from running as + * we are going to change the local NAPI list. + */ + local_bh_disable(); + napi_schedule(&bnx2x_fcoe(bp, napi)); + local_bh_enable(); + } - if (unlikely(status)) - DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", - status); + /* Handle EQ completions */ + bnx2x_eq_int(bp); + bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, + le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); + + status &= ~BNX2X_DEF_SB_IDX; + } + + /* if status is non zero then perhaps something went wrong */ + if (unlikely(status)) + DP(BNX2X_MSG_SP, + "got an unknown interrupt! (status 0x%x)\n", status); + + /* ack status block only if something was actually handled */ + bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, + le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); + } - bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, - le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); + /* afex - poll to check if VIFSET_ACK should be sent to MFW */ + if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, + &bp->sp_state)) { + bnx2x_link_report(bp); + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); + } } irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) @@ -4802,8 +5588,7 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) return IRQ_HANDLED; #endif -#ifdef BCM_CNIC - { + if (CNIC_LOADED(bp)) { struct cnic_ops *c_ops; rcu_read_lock(); @@ -4812,22 +5597,23 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) c_ops->cnic_handler(bp->cnic_data, NULL); rcu_read_unlock(); } -#endif - queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); + + /* schedule sp task to perform default status block work, ack + * attentions and enable interrupts. + */ + bnx2x_schedule_sp_task(bp); return IRQ_HANDLED; } /* end of slow path */ - void bnx2x_drv_pulse(struct bnx2x *bp) { SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, bp->fw_drv_pulse_wr_seq); } - static void bnx2x_timer(unsigned long data) { struct bnx2x *bp = (struct bnx2x *) data; @@ -4835,33 +5621,36 @@ static void bnx2x_timer(unsigned long data) if (!netif_running(bp->dev)) return; - if (!BP_NOMCP(bp)) { + if (IS_PF(bp) && + !BP_NOMCP(bp)) { int mb_idx = BP_FW_MB_IDX(bp); - u32 drv_pulse; - u32 mcp_pulse; + u16 drv_pulse; + u16 mcp_pulse; ++bp->fw_drv_pulse_wr_seq; bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; - /* TBD - add SYSTEM_TIME */ drv_pulse = bp->fw_drv_pulse_wr_seq; bnx2x_drv_pulse(bp); mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & MCP_PULSE_SEQ_MASK); /* The delta between driver pulse and mcp response - * should be 1 (before mcp response) or 0 (after mcp response) + * should not get too big. If the MFW is more than 5 pulses + * behind, we should worry about it enough to generate an error + * log. */ - if ((drv_pulse != mcp_pulse) && - (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { - /* someone lost a heartbeat... */ - BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n", + if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5) + BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n", drv_pulse, mcp_pulse); - } } if (bp->state == BNX2X_STATE_OPEN) bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); + /* sample pf vf bulletin board for new posts from pf */ + if (IS_VF(bp)) + bnx2x_timer_sriov(bp); + mod_timer(&bp->timer, jiffies + bp->current_interval); } @@ -4873,7 +5662,7 @@ static void bnx2x_timer(unsigned long data) * nic init service functions */ -static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) +static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) { u32 i; if (!(len%4) && !(addr%4)) @@ -4882,14 +5671,13 @@ static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) else for (i = 0; i < len; i++) REG_WR8(bp, addr + i, fill); - } /* helper: writes FP SP data to FW - data_size in dwords */ -static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp, - int fw_sb_id, - u32 *sb_data_p, - u32 data_size) +static void bnx2x_wr_fp_sb_data(struct bnx2x *bp, + int fw_sb_id, + u32 *sb_data_p, + u32 data_size) { int index; for (index = 0; index < data_size; index++) @@ -4899,7 +5687,7 @@ static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp, *(sb_data_p + index)); } -static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) +static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) { u32 *sb_data_p; u32 data_size = 0; @@ -4932,7 +5720,7 @@ static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) } /* helper: writes SP SB data to FW */ -static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp, +static void bnx2x_wr_sp_sb_data(struct bnx2x *bp, struct hc_sp_status_block_data *sp_sb_data) { int func = BP_FUNC(bp); @@ -4944,7 +5732,7 @@ static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp, *((u32 *)sp_sb_data + i)); } -static inline void bnx2x_zero_sp_sb(struct bnx2x *bp) +static void bnx2x_zero_sp_sb(struct bnx2x *bp) { int func = BP_FUNC(bp); struct hc_sp_status_block_data sp_sb_data; @@ -4961,12 +5749,9 @@ static inline void bnx2x_zero_sp_sb(struct bnx2x *bp) bnx2x_fill(bp, BAR_CSTRORM_INTMEM + CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, CSTORM_SP_SYNC_BLOCK_SIZE); - } - -static inline -void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, +static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, int igu_seg_id) { hc_sm->igu_sb_id = igu_sb_id; @@ -4975,10 +5760,8 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, hc_sm->time_to_expire = 0xFFFFFFFF; } - /* allocates state machine ids. */ -static inline -void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) +static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) { /* zero out state machine indices */ /* rx indices */ @@ -5006,7 +5789,7 @@ void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; } -static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, +void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, u8 vf_valid, int fw_sb_id, int igu_sb_id) { int igu_seg_id; @@ -5060,9 +5843,9 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); - DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id); + DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); - /* write indecies to HW */ + /* write indices to HW - PCI guarantees endianity of regpairs */ bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); } @@ -5150,6 +5933,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) bnx2x_zero_sp_sb(bp); + /* PCI guarantees endianity of regpairs */ sp_sb_data.state = SB_ENABLED; sp_sb_data.host_sb_addr.lo = U64_LO(section); sp_sb_data.host_sb_addr.hi = U64_HI(section); @@ -5201,18 +5985,17 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp) bp->eq_cons = 0; bp->eq_prod = NUM_EQ_DESC; bp->eq_cons_sb = BNX2X_EQ_INDEX; - /* we want a warning message before it gets rought... */ + /* we want a warning message before it gets wrought... */ atomic_set(&bp->eq_spq_left, min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); } - /* called with netif_addr_lock_bh() */ -void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, - unsigned long rx_mode_flags, - unsigned long rx_accept_flags, - unsigned long tx_accept_flags, - unsigned long ramrod_flags) +static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, + unsigned long rx_mode_flags, + unsigned long rx_accept_flags, + unsigned long tx_accept_flags, + unsigned long ramrod_flags) { struct bnx2x_rx_mode_ramrod_params ramrod_param; int rc; @@ -5242,24 +6025,21 @@ void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, rc = bnx2x_config_rx_mode(bp, &ramrod_param); if (rc < 0) { BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); - return; + return rc; } + + return 0; } -/* called with netif_addr_lock_bh() */ -void bnx2x_set_storm_rx_mode(struct bnx2x *bp) +static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, + unsigned long *rx_accept_flags, + unsigned long *tx_accept_flags) { - unsigned long rx_mode_flags = 0, ramrod_flags = 0; - unsigned long rx_accept_flags = 0, tx_accept_flags = 0; - -#ifdef BCM_CNIC - if (!NO_FCOE(bp)) + /* Clear the flags first */ + *rx_accept_flags = 0; + *tx_accept_flags = 0; - /* Configure rx_mode of FCoE Queue */ - __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); -#endif - - switch (bp->rx_mode) { + switch (rx_mode) { case BNX2X_RX_MODE_NONE: /* * 'drop all' supersedes any accept flags that may have been @@ -5267,80 +6047,89 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) */ break; case BNX2X_RX_MODE_NORMAL: - __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ - __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); - __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); break; case BNX2X_RX_MODE_ALLMULTI: - __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ - __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); break; case BNX2X_RX_MODE_PROMISC: - /* According to deffinition of SI mode, iface in promisc mode + /* According to definition of SI mode, iface in promisc mode * should receive matched and unmatched (in resolution of port) * unicast packets. */ - __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); if (IS_MF_SI(bp)) - __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags); else - __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); break; default: - BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode); - return; + BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode); + return -EINVAL; } + /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ if (bp->rx_mode != BNX2X_RX_MODE_NONE) { - __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); } + return 0; +} + +/* called with netif_addr_lock_bh() */ +static int bnx2x_set_storm_rx_mode(struct bnx2x *bp) +{ + unsigned long rx_mode_flags = 0, ramrod_flags = 0; + unsigned long rx_accept_flags = 0, tx_accept_flags = 0; + int rc; + + if (!NO_FCOE(bp)) + /* Configure rx_mode of FCoE Queue */ + __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); + + rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags, + &tx_accept_flags); + if (rc) + return rc; + __set_bit(RAMROD_RX, &ramrod_flags); __set_bit(RAMROD_TX, &ramrod_flags); - bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags, - tx_accept_flags, ramrod_flags); + return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, + rx_accept_flags, tx_accept_flags, + ramrod_flags); } static void bnx2x_init_internal_common(struct bnx2x *bp) { int i; - if (IS_MF_SI(bp)) - /* - * In switch independent mode, the TSTORM needs to accept - * packets that failed classification, since approximate match - * mac addresses aren't written to NIG LLH - */ - REG_WR8(bp, BAR_TSTRORM_INTMEM + - TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); - else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ - REG_WR8(bp, BAR_TSTRORM_INTMEM + - TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); - /* Zero this manually as its initialization is currently missing in the initTool */ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) @@ -5378,15 +6167,15 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) { - return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT; + return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp); } static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) { - return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; + return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp); } -static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) +static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) { if (CHIP_IS_E1x(fp->bp)) return BP_L_ID(fp->bp) + fp->index; @@ -5410,7 +6199,8 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) /* init shortcut */ fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); - /* Setup SB indicies */ + + /* Setup SB indices */ fp->rx_cons_sb = BNX2X_RX_SB_INDEX; /* Configure Queue State object */ @@ -5421,15 +6211,22 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) /* init tx data */ for_each_cos_in_tx_queue(fp, cos) { - bnx2x_init_txdata(bp, &fp->txdata[cos], - CID_COS_TO_TX_ONLY_CID(fp->cid, cos), - FP_COS_TO_TXQ(fp, cos), - BNX2X_TX_SB_INDEX_BASE + cos); - cids[cos] = fp->txdata[cos].cid; + bnx2x_init_txdata(bp, fp->txdata_ptr[cos], + CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), + FP_COS_TO_TXQ(fp, cos, bp), + BNX2X_TX_SB_INDEX_BASE + cos, fp); + cids[cos] = fp->txdata_ptr[cos]->cid; } - bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos, - BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + /* nothing more for vf to do here */ + if (IS_VF(bp)) + return; + + bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, + fp->fw_sb_id, fp->igu_sb_id); + bnx2x_update_fpsb_idx(fp); + bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids, + fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata), q_type); /** @@ -5437,23 +6234,102 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) */ bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); - DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) " - "cl_id %d fw_sb %d igu_sb %d\n", - fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, - fp->igu_sb_id); - bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, - fp->fw_sb_id, fp->igu_sb_id); + DP(NETIF_MSG_IFUP, + "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", + fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, + fp->igu_sb_id); +} - bnx2x_update_fpsb_idx(fp); +static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) +{ + int i; + + for (i = 1; i <= NUM_TX_RINGS; i++) { + struct eth_tx_next_bd *tx_next_bd = + &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; + + tx_next_bd->addr_hi = + cpu_to_le32(U64_HI(txdata->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + tx_next_bd->addr_lo = + cpu_to_le32(U64_LO(txdata->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + } + + *txdata->tx_cons_sb = cpu_to_le16(0); + + SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); + txdata->tx_db.data.zero_fill1 = 0; + txdata->tx_db.data.prod = 0; + + txdata->tx_pkt_prod = 0; + txdata->tx_pkt_cons = 0; + txdata->tx_bd_prod = 0; + txdata->tx_bd_cons = 0; + txdata->tx_pkt = 0; } -void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) +static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp) { int i; + for_each_tx_queue_cnic(bp, i) + bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); +} + +static void bnx2x_init_tx_rings(struct bnx2x *bp) +{ + int i; + u8 cos; + for_each_eth_queue(bp, i) - bnx2x_init_eth_fp(bp, i); -#ifdef BCM_CNIC + for_each_cos_in_tx_queue(&bp->fp[i], cos) + bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); +} + +static void bnx2x_init_fcoe_fp(struct bnx2x *bp) +{ + struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); + unsigned long q_type = 0; + + bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); + bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, + BNX2X_FCOE_ETH_CL_ID_IDX); + bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); + bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; + bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; + bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; + bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), + fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, + fp); + + DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); + + /* qZone id equals to FW (per path) client id */ + bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); + /* init shortcut */ + bnx2x_fcoe(bp, ustorm_rx_prods_offset) = + bnx2x_rx_ustorm_prods_offset(fp); + + /* Configure Queue State object */ + __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + + /* No multi-CoS for FCoE L2 client */ + BUG_ON(fp->max_cos != 1); + + bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, + &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_sp_mapping(bp, q_rdata), q_type); + + DP(NETIF_MSG_IFUP, + "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", + fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, + fp->igu_sb_id); +} + +void bnx2x_nic_init_cnic(struct bnx2x *bp) +{ if (!NO_FCOE(bp)) bnx2x_init_fcoe_fp(bp); @@ -5461,20 +6337,46 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) BNX2X_VF_ID_INVALID, false, bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); -#endif - - /* Initialize MOD_ABS interrupts */ - bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, - bp->common.shmem_base, bp->common.shmem2_base, - BP_PORT(bp)); /* ensure status block indices were read */ rmb(); + bnx2x_init_rx_rings_cnic(bp); + bnx2x_init_tx_rings_cnic(bp); + + /* flush all */ + mb(); + mmiowb(); +} + +void bnx2x_pre_irq_nic_init(struct bnx2x *bp) +{ + int i; + + /* Setup NIC internals and enable interrupts */ + for_each_eth_queue(bp, i) + bnx2x_init_eth_fp(bp, i); - bnx2x_init_def_sb(bp); - bnx2x_update_dsb_idx(bp); + /* ensure status block indices were read */ + rmb(); bnx2x_init_rx_rings(bp); bnx2x_init_tx_rings(bp); - bnx2x_init_sp_ring(bp); + + if (IS_PF(bp)) { + /* Initialize MOD_ABS interrupts */ + bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, + bp->common.shmem_base, + bp->common.shmem2_base, BP_PORT(bp)); + + /* initialize the default status block and sp ring */ + bnx2x_init_def_sb(bp); + bnx2x_update_dsb_idx(bp); + bnx2x_init_sp_ring(bp); + } else { + bnx2x_memset_stats(bp); + } +} + +void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code) +{ bnx2x_init_eq_ring(bp); bnx2x_init_internal(bp, load_code); bnx2x_pf_init(bp); @@ -5492,12 +6394,7 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) AEU_INPUTS_ATTN_BITS_SPIO5); } -/* end of nic init */ - -/* - * gzip service functions - */ - +/* gzip service functions */ static int bnx2x_gunzip_init(struct bnx2x *bp) { bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, @@ -5525,8 +6422,7 @@ gunzip_nomem2: bp->gunzip_buf = NULL; gunzip_nomem1: - netdev_err(bp->dev, "Cannot allocate firmware buffer for" - " un-compression\n"); + BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n"); return -ENOMEM; } @@ -5578,8 +6474,8 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); if (bp->gunzip_outlen & 0x3) - netdev_err(bp->dev, "Firmware decompression error:" - " gunzip_outlen (%d) not aligned\n", + netdev_err(bp->dev, + "Firmware decompression error: gunzip_outlen (%d) not aligned\n", bp->gunzip_outlen); bp->gunzip_outlen >>= 2; @@ -5654,7 +6550,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) if (val == 0x10) break; - msleep(10); + usleep_range(10000, 20000); count--; } if (val != 0x10) { @@ -5669,7 +6565,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) if (val == 1) break; - msleep(10); + usleep_range(10000, 20000); count--; } if (val != 0x1) { @@ -5710,7 +6606,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) if (val == 0xb0) break; - msleep(10); + usleep_range(10000, 20000); count--; } if (val != 0xb0) { @@ -5749,10 +6645,9 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) msleep(50); bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); -#ifndef BCM_CNIC - /* set NIC mode */ - REG_WR(bp, PRS_REG_NIC_MODE, 1); -#endif + if (!CNIC_SUPPORT(bp)) + /* set NIC mode */ + REG_WR(bp, PRS_REG_NIC_MODE, 1); /* Enable inputs of parser neighbor blocks */ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); @@ -5767,6 +6662,8 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) static void bnx2x_enable_blocks_attention(struct bnx2x *bp) { + u32 val; + REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); if (!CHIP_IS_E1x(bp)) REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); @@ -5800,17 +6697,14 @@ static void bnx2x_enable_blocks_attention(struct bnx2x *bp) /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ - if (CHIP_REV_IS_FPGA(bp)) - REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); - else if (!CHIP_IS_E1x(bp)) - REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, - (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF - | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT - | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN - | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED - | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED)); - else - REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); + val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | + PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | + PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN; + if (!CHIP_IS_E1x(bp)) + val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | + PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED; + REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val); + REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); @@ -5853,8 +6747,7 @@ static void bnx2x_init_pxp(struct bnx2x *bp) u16 devctl; int r_order, w_order; - pci_read_config_word(bp->pdev, - pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl); + pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); if (bp->mrrs == -1) @@ -5904,64 +6797,19 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) return; /* Fan failure is indicated by SPIO 5 */ - bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, - MISC_REGISTERS_SPIO_INPUT_HI_Z); + bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); /* set to active low mode */ val = REG_RD(bp, MISC_REG_SPIO_INT); - val |= ((1 << MISC_REGISTERS_SPIO_5) << - MISC_REGISTERS_SPIO_INT_OLD_SET_POS); + val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); REG_WR(bp, MISC_REG_SPIO_INT, val); /* enable interrupt to signal the IGU */ val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); - val |= (1 << MISC_REGISTERS_SPIO_5); + val |= MISC_SPIO_SPIO5; REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); } -static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num) -{ - u32 offset = 0; - - if (CHIP_IS_E1(bp)) - return; - if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX)) - return; - - switch (BP_ABS_FUNC(bp)) { - case 0: - offset = PXP2_REG_PGL_PRETEND_FUNC_F0; - break; - case 1: - offset = PXP2_REG_PGL_PRETEND_FUNC_F1; - break; - case 2: - offset = PXP2_REG_PGL_PRETEND_FUNC_F2; - break; - case 3: - offset = PXP2_REG_PGL_PRETEND_FUNC_F3; - break; - case 4: - offset = PXP2_REG_PGL_PRETEND_FUNC_F4; - break; - case 5: - offset = PXP2_REG_PGL_PRETEND_FUNC_F5; - break; - case 6: - offset = PXP2_REG_PGL_PRETEND_FUNC_F6; - break; - case 7: - offset = PXP2_REG_PGL_PRETEND_FUNC_F7; - break; - default: - return; - } - - REG_WR(bp, offset, pretend_func_num); - REG_RD(bp, offset); - DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num); -} - void bnx2x_pf_disable(struct bnx2x *bp) { u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); @@ -5972,9 +6820,13 @@ void bnx2x_pf_disable(struct bnx2x *bp) REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); } -static inline void bnx2x__common_init_phy(struct bnx2x *bp) +static void bnx2x__common_init_phy(struct bnx2x *bp) { u32 shmem_base[2], shmem2_base[2]; + /* Avoid common init in case MFW supports LFA */ + if (SHMEM2_RD(bp, size) > + (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) + return; shmem_base[0] = bp->common.shmem_base; shmem2_base[0] = bp->common.shmem2_base; if (!CHIP_IS_E1x(bp)) { @@ -5998,10 +6850,10 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) { u32 val; - DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); + DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); /* - * take the UNDI lock to protect undi_unload flow from accessing + * take the RESET lock to protect undi_unload flow from accessing * registers while we're resetting the chip */ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); @@ -6131,7 +6983,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) * queues with "old" ILT addresses. * c. PF enable in the PGLC. * d. Clear the was_error of the PF in the PGLC. (could have - * occured while driver was down) + * occurred while driver was down) * e. PF enable in the CFC (WEAK + STRONG) * f. Timers scan enable * 3. PF driver unload flow: @@ -6143,7 +6995,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) * stay set) * f. If this is VNIC 3 of a port then also init * first_timers_ilt_entry to zero and last_timers_ilt_entry - * to the last enrty in the ILT. + * to the last entry in the ILT. * * Notes: * Currently the PF error in the PGLC is non recoverable. @@ -6172,7 +7024,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) /* Step 1: set zeroes to all ilt page entries with valid bit on * Step 2: set the timers first/last ilt entry to point * to the entire range to prevent ILT range error for 3rd/4th - * vnic (this code assumes existance of the vnic) + * vnic (this code assumes existence of the vnic) * * both steps performed by call to bnx2x_ilt_client_init_op() * with dummy TM client @@ -6189,7 +7041,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); } - REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); @@ -6214,6 +7065,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); + bnx2x_iov_init_dmae(bp); + /* clean the DMAE memory */ bp->dmae_ready = 1; bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); @@ -6233,7 +7086,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); - /* QM queues pointers table */ bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); @@ -6241,12 +7093,11 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) REG_WR(bp, QM_REG_SOFT_RESET, 1); REG_WR(bp, QM_REG_SOFT_RESET, 0); -#ifdef BCM_CNIC - bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); -#endif + if (CNIC_SUPPORT(bp)) + bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); - REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); + if (!CHIP_REV_IS_SLOW(bp)) /* enable hw interrupt from doorbell Q */ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); @@ -6259,12 +7110,24 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) if (!CHIP_IS_E1(bp)) REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); - if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) - /* Bit-map indicating which L2 hdrs may appear - * after the basic Ethernet header - */ - REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, - bp->path_has_ovlan ? 7 : 6); + if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) { + if (IS_MF_AFEX(bp)) { + /* configure that VNTag and VLAN headers must be + * received in afex mode + */ + REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE); + REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA); + REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6); + REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926); + REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4); + } else { + /* Bit-map indicating which L2 hdrs may appear + * after the basic Ethernet header + */ + REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, + bp->path_has_ovlan ? 7 : 6); + } + } bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); @@ -6298,33 +7161,45 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); - if (!CHIP_IS_E1x(bp)) - REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, - bp->path_has_ovlan ? 7 : 6); + if (!CHIP_IS_E1x(bp)) { + if (IS_MF_AFEX(bp)) { + /* configure that VNTag and VLAN headers must be + * sent in afex mode + */ + REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE); + REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA); + REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6); + REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926); + REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4); + } else { + REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, + bp->path_has_ovlan ? 7 : 6); + } + } REG_WR(bp, SRC_REG_SOFT_RST, 1); bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); -#ifdef BCM_CNIC - REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); - REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); - REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); - REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); - REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); - REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); - REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); - REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); - REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); - REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); -#endif + if (CNIC_SUPPORT(bp)) { + REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); + REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); + REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); + REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); + REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); + REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); + REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); + REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); + REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); + REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); + } REG_WR(bp, SRC_REG_SOFT_RST, 0); if (sizeof(union cdu_context) != 1024) /* we currently assume that a context is 1024 bytes */ - dev_alert(&bp->pdev->dev, "please adjust the size " - "of cdu_context(%ld)\n", - (long)sizeof(union cdu_context)); + dev_alert(&bp->pdev->dev, + "please adjust the size of cdu_context(%ld)\n", + (long)sizeof(union cdu_context)); bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); val = (4 << 24) + (0 << 12) + 1024; @@ -6449,11 +7324,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) int port = BP_PORT(bp); int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; u32 low, high; - u32 val; - - bnx2x__link_reset(bp); + u32 val, reg; - DP(BNX2X_MSG_MCP, "starting port init port %d\n", port); + DP(NETIF_MSG_HW, "starting port init port %d\n", port); REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); @@ -6482,16 +7355,17 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) /* QM cid (connection) count */ bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); -#ifdef BCM_CNIC - bnx2x_init_block(bp, BLOCK_TM, init_phase); - REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); - REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); -#endif + if (CNIC_SUPPORT(bp)) { + bnx2x_init_block(bp, BLOCK_TM, init_phase); + REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); + REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); + } bnx2x_init_block(bp, BLOCK_DORQ, init_phase); + bnx2x_init_block(bp, BLOCK_BRB1, init_phase); + if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { - bnx2x_init_block(bp, BLOCK_BRB1, init_phase); if (IS_MF(bp)) low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); @@ -6516,17 +7390,30 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) BRB1_REG_MAC_GUARANTIED_1 : BRB1_REG_MAC_GUARANTIED_0), 40); - bnx2x_init_block(bp, BLOCK_PRS, init_phase); - if (CHIP_IS_E3B0(bp)) - /* Ovlan exists only if we are in multi-function + - * switch-dependent mode, in switch-independent there - * is no ovlan headers - */ - REG_WR(bp, BP_PORT(bp) ? - PRS_REG_HDRS_AFTER_BASIC_PORT_1 : - PRS_REG_HDRS_AFTER_BASIC_PORT_0, - (bp->path_has_ovlan ? 7 : 6)); + if (CHIP_IS_E3B0(bp)) { + if (IS_MF_AFEX(bp)) { + /* configure headers for AFEX mode */ + REG_WR(bp, BP_PORT(bp) ? + PRS_REG_HDRS_AFTER_BASIC_PORT_1 : + PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); + REG_WR(bp, BP_PORT(bp) ? + PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : + PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); + REG_WR(bp, BP_PORT(bp) ? + PRS_REG_MUST_HAVE_HDRS_PORT_1 : + PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); + } else { + /* Ovlan exists only if we are in multi-function + + * switch-dependent mode, in switch-independent there + * is no ovlan headers + */ + REG_WR(bp, BP_PORT(bp) ? + PRS_REG_HDRS_AFTER_BASIC_PORT_1 : + PRS_REG_HDRS_AFTER_BASIC_PORT_0, + (bp->path_has_ovlan ? 7 : 6)); + } + } bnx2x_init_block(bp, BLOCK_TSDM, init_phase); bnx2x_init_block(bp, BLOCK_CSDM, init_phase); @@ -6558,9 +7445,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); } -#ifdef BCM_CNIC - bnx2x_init_block(bp, BLOCK_SRC, init_phase); -#endif + if (CNIC_SUPPORT(bp)) + bnx2x_init_block(bp, BLOCK_SRC, init_phase); + bnx2x_init_block(bp, BLOCK_CDU, init_phase); bnx2x_init_block(bp, BLOCK_CFC, init_phase); @@ -6574,24 +7461,40 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); /* init aeu_mask_attn_func_0/1: - * - SF mode: bits 3-7 are masked. only bits 0-2 are in use - * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF + * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use + * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF * bits 4-7 are used for "per vn group attention" */ val = IS_MF(bp) ? 0xF7 : 0x7; /* Enable DCBX attention for all but E1 */ val |= CHIP_IS_E1(bp) ? 0 : 0x10; REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); + /* SCPAD_PARITY should NOT trigger close the gates */ + reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0; + REG_WR(bp, reg, + REG_RD(bp, reg) & + ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); + + reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0; + REG_WR(bp, reg, + REG_RD(bp, reg) & + ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); + bnx2x_init_block(bp, BLOCK_NIG, init_phase); if (!CHIP_IS_E1x(bp)) { /* Bit-map indicating which L2 hdrs may appear after the * basic Ethernet header */ - REG_WR(bp, BP_PORT(bp) ? - NIG_REG_P1_HDRS_AFTER_BASIC : - NIG_REG_P0_HDRS_AFTER_BASIC, - IS_MF_SD(bp) ? 7 : 6); + if (IS_MF_AFEX(bp)) + REG_WR(bp, BP_PORT(bp) ? + NIG_REG_P1_HDRS_AFTER_BASIC : + NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); + else + REG_WR(bp, BP_PORT(bp) ? + NIG_REG_P1_HDRS_AFTER_BASIC : + NIG_REG_P0_HDRS_AFTER_BASIC, + IS_MF_SD(bp) ? 7 : 6); if (CHIP_IS_E3(bp)) REG_WR(bp, BP_PORT(bp) ? @@ -6613,6 +7516,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) val = 1; break; case MULTI_FUNCTION_SI: + case MULTI_FUNCTION_AFEX: val = 2; break; } @@ -6627,10 +7531,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) } } - /* If SPIO5 is set to generate interrupts, enable it for this port */ val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); - if (val & (1 << MISC_REGISTERS_SPIO_5)) { + if (val & MISC_SPIO_SPIO5) { u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); val = REG_RD(bp, reg_addr); @@ -6644,27 +7547,197 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) { int reg; + u32 wb_write[2]; if (CHIP_IS_E1(bp)) reg = PXP2_REG_RQ_ONCHIP_AT + index*8; else reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; - bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); + wb_write[0] = ONCHIP_ADDR1(addr); + wb_write[1] = ONCHIP_ADDR2(addr); + REG_WR_DMAE(bp, reg, wb_write, 2); } -static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) +void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) +{ + u32 data, ctl, cnt = 100; + u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; + u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; + u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; + u32 sb_bit = 1 << (idu_sb_id%32); + u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; + u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; + + /* Not supported in BC mode */ + if (CHIP_INT_MODE_IS_BC(bp)) + return; + + data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup + << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | + IGU_REGULAR_CLEANUP_SET | + IGU_REGULAR_BCLEANUP; + + ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | + func_encode << IGU_CTRL_REG_FID_SHIFT | + IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; + + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + data, igu_addr_data); + REG_WR(bp, igu_addr_data, data); + mmiowb(); + barrier(); + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + ctl, igu_addr_ctl); + REG_WR(bp, igu_addr_ctl, ctl); + mmiowb(); + barrier(); + + /* wait for clean up to finish */ + while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) + msleep(20); + + if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { + DP(NETIF_MSG_HW, + "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", + idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); + } +} + +static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) { bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); } -static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) +static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) { u32 i, base = FUNC_ILT_BASE(func); for (i = base; i < base + ILT_PER_FUNC; i++) bnx2x_ilt_wr(bp, i, 0); } +static void bnx2x_init_searcher(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); + /* T1 hash bits value determines the T1 number of entries */ + REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); +} + +static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend) +{ + int rc; + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_switch_update_params *switch_update_params = + &func_params.params.switch_update; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; + + /* Function parameters */ + switch_update_params->suspend = suspend; + + rc = bnx2x_func_state_change(bp, &func_params); + + return rc; +} + +static int bnx2x_reset_nic_mode(struct bnx2x *bp) +{ + int rc, i, port = BP_PORT(bp); + int vlan_en = 0, mac_en[NUM_MACS]; + + /* Close input from network */ + if (bp->mf_mode == SINGLE_FUNCTION) { + bnx2x_set_rx_filter(&bp->link_params, 0); + } else { + vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN : + NIG_REG_LLH0_FUNC_EN); + REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : + NIG_REG_LLH0_FUNC_EN, 0); + for (i = 0; i < NUM_MACS; i++) { + mac_en[i] = REG_RD(bp, port ? + (NIG_REG_LLH1_FUNC_MEM_ENABLE + + 4 * i) : + (NIG_REG_LLH0_FUNC_MEM_ENABLE + + 4 * i)); + REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + + 4 * i) : + (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0); + } + } + + /* Close BMC to host */ + REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : + NIG_REG_P1_TX_MNG_HOST_ENABLE, 0); + + /* Suspend Tx switching to the PF. Completion of this ramrod + * further guarantees that all the packets of that PF / child + * VFs in BRB were processed by the Parser, so it is safe to + * change the NIC_MODE register. + */ + rc = bnx2x_func_switch_update(bp, 1); + if (rc) { + BNX2X_ERR("Can't suspend tx-switching!\n"); + return rc; + } + + /* Change NIC_MODE register */ + REG_WR(bp, PRS_REG_NIC_MODE, 0); + + /* Open input from network */ + if (bp->mf_mode == SINGLE_FUNCTION) { + bnx2x_set_rx_filter(&bp->link_params, 1); + } else { + REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : + NIG_REG_LLH0_FUNC_EN, vlan_en); + for (i = 0; i < NUM_MACS; i++) { + REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + + 4 * i) : + (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), + mac_en[i]); + } + } + + /* Enable BMC to host */ + REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : + NIG_REG_P1_TX_MNG_HOST_ENABLE, 1); + + /* Resume Tx switching to the PF */ + rc = bnx2x_func_switch_update(bp, 0); + if (rc) { + BNX2X_ERR("Can't resume tx-switching!\n"); + return rc; + } + + DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); + return 0; +} + +int bnx2x_init_hw_func_cnic(struct bnx2x *bp) +{ + int rc; + + bnx2x_ilt_init_op_cnic(bp, INITOP_SET); + + if (CONFIGURE_NIC_MODE(bp)) { + /* Configure searcher as part of function hw init */ + bnx2x_init_searcher(bp); + + /* Reset NIC mode */ + rc = bnx2x_reset_nic_mode(bp); + if (rc) + BNX2X_ERR("Can't change NIC mode!\n"); + return rc; + } + + return 0; +} + static int bnx2x_init_hw_func(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -6674,13 +7747,18 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) u16 cdu_ilt_start; u32 addr, val; u32 main_mem_base, main_mem_size, main_mem_prty_clr; - int i, main_mem_width; + int i, main_mem_width, rc; - DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); + DP(NETIF_MSG_HW, "starting func init func %d\n", func); /* FLR cleanup - hmmm */ - if (!CHIP_IS_E1x(bp)) - bnx2x_pf_flr_clnup(bp); + if (!CHIP_IS_E1x(bp)) { + rc = bnx2x_pf_flr_clnup(bp); + if (rc) { + bnx2x_fw_dump(bp); + return rc; + } + } /* set MSI reconfigure capability */ if (bp->common.int_block == INT_BLOCK_HC) { @@ -6696,27 +7774,32 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) ilt = BP_ILT(bp); cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; + if (IS_SRIOV(bp)) + cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS; + cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start); + + /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes + * those of the VFs, so start line should be reset + */ + cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; for (i = 0; i < L2_ILT_LINES(bp); i++) { - ilt->lines[cdu_ilt_start + i].page = - bp->context.vcxt + (ILT_PAGE_CIDS * i); + ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; ilt->lines[cdu_ilt_start + i].page_mapping = - bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i); - /* cdu ilt pages are allocated manually so there's no need to - set the size */ + bp->context[i].cxt_mapping; + ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; } - bnx2x_ilt_init_op(bp, INITOP_SET); - -#ifdef BCM_CNIC - bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); - /* T1 hash bits value determines the T1 number of entries */ - REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); -#endif + bnx2x_ilt_init_op(bp, INITOP_SET); -#ifndef BCM_CNIC - /* set NIC mode */ - REG_WR(bp, PRS_REG_NIC_MODE, 1); -#endif /* BCM_CNIC */ + if (!CONFIGURE_NIC_MODE(bp)) { + bnx2x_init_searcher(bp); + REG_WR(bp, PRS_REG_NIC_MODE, 0); + DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); + } else { + /* Set NIC mode */ + REG_WR(bp, PRS_REG_NIC_MODE, 1); + DP(NETIF_MSG_IFUP, "NIC MODE configured\n"); + } if (!CHIP_IS_E1x(bp)) { u32 pf_conf = IGU_PF_CONF_FUNC_EN; @@ -6777,6 +7860,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_TM, init_phase); bnx2x_init_block(bp, BLOCK_DORQ, init_phase); + REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */ + + bnx2x_iov_init_dq(bp); + bnx2x_init_block(bp, BLOCK_BRB1, init_phase); bnx2x_init_block(bp, BLOCK_PRS, init_phase); bnx2x_init_block(bp, BLOCK_TSDM, init_phase); @@ -6909,7 +7996,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) } bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); - /* !!! these should become driver const once + /* !!! These should become driver const once rf-tool supports split-68 const */ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); @@ -6933,9 +8020,9 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) val = REG_RD(bp, main_mem_prty_clr); if (val) - DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC " - "block during " - "function init (0x%x)!\n", val); + DP(NETIF_MSG_HW, + "Hmmm... Parity errors in HC block during function init (0x%x)!\n", + val); /* Clear "false" parity errors in MSI-X table */ for (i = main_mem_base; @@ -6966,161 +8053,162 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) return 0; } +void bnx2x_free_mem_cnic(struct bnx2x *bp) +{ + bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE); + + if (!CHIP_IS_E1x(bp)) + BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e2)); + else + BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e1x)); + + BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); +} void bnx2x_free_mem(struct bnx2x *bp) { - /* fastpath */ - bnx2x_free_fp_mem(bp); - /* end of fastpath */ - - BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, - sizeof(struct host_sp_status_block)); + int i; BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, bp->fw_stats_data_sz + bp->fw_stats_req_sz); + if (IS_VF(bp)) + return; + + BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, + sizeof(struct host_sp_status_block)); + BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, sizeof(struct bnx2x_slowpath)); - BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping, - bp->context.size); - + for (i = 0; i < L2_ILT_LINES(bp); i++) + BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, + bp->context[i].size); bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); BNX2X_FREE(bp->ilt->lines); -#ifdef BCM_CNIC - if (!CHIP_IS_E1x(bp)) - BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, - sizeof(struct host_hc_status_block_e2)); - else - BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, - sizeof(struct host_hc_status_block_e1x)); - - BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); -#endif - BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, BCM_PAGE_SIZE * NUM_EQ_PAGES); -} - -static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) -{ - int num_groups; - int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; - - /* number of queues for statistics is number of eth queues + FCoE */ - u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; - /* Total number of FW statistics requests = - * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats + - * num of queues - */ - bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; - - - /* Request is built from stats_query_header and an array of - * stats_query_cmd_group each of which contains - * STATS_QUERY_CMD_COUNT rules. The real number or requests is - * configured in the stats_query_header. - */ - num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) + - (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0); + BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); - bp->fw_stats_req_sz = sizeof(struct stats_query_header) + - num_groups * sizeof(struct stats_query_cmd_group); + bnx2x_iov_free_mem(bp); +} - /* Data for statistics requests + stats_conter - * - * stats_counter holds per-STORM counters that are incremented - * when STORM has finished with the current request. - * - * memory for FCoE offloaded statistics are counted anyway, - * even if they will not be sent. - */ - bp->fw_stats_data_sz = sizeof(struct per_port_stats) + - sizeof(struct per_pf_stats) + - sizeof(struct fcoe_statistics_params) + - sizeof(struct per_queue_stats) * num_queue_stats + - sizeof(struct stats_counter); +int bnx2x_alloc_mem_cnic(struct bnx2x *bp) +{ + if (!CHIP_IS_E1x(bp)) { + /* size = the status block + ramrod buffers */ + bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e2)); + if (!bp->cnic_sb.e2_sb) + goto alloc_mem_err; + } else { + bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e1x)); + if (!bp->cnic_sb.e1x_sb) + goto alloc_mem_err; + } - BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, - bp->fw_stats_data_sz + bp->fw_stats_req_sz); + if (CONFIGURE_NIC_MODE(bp) && !bp->t2) { + /* allocate searcher T2 table, as it wasn't allocated before */ + bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); + if (!bp->t2) + goto alloc_mem_err; + } - /* Set shortcuts */ - bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; - bp->fw_stats_req_mapping = bp->fw_stats_mapping; + /* write address to which L5 should insert its values */ + bp->cnic_eth_dev.addr_drv_info_to_mcp = + &bp->slowpath->drv_info_to_mcp; - bp->fw_stats_data = (struct bnx2x_fw_stats_data *) - ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); + if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC)) + goto alloc_mem_err; - bp->fw_stats_data_mapping = bp->fw_stats_mapping + - bp->fw_stats_req_sz; return 0; alloc_mem_err: - BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, - bp->fw_stats_data_sz + bp->fw_stats_req_sz); + bnx2x_free_mem_cnic(bp); + BNX2X_ERR("Can't allocate memory\n"); return -ENOMEM; } - int bnx2x_alloc_mem(struct bnx2x *bp) { -#ifdef BCM_CNIC - if (!CHIP_IS_E1x(bp)) - /* size = the status block + ramrod buffers */ - BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, - sizeof(struct host_hc_status_block_e2)); - else - BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping, - sizeof(struct host_hc_status_block_e1x)); - - /* allocate searcher T2 table */ - BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); -#endif - + int i, allocated, context_size; - BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, - sizeof(struct host_sp_status_block)); - - BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, - sizeof(struct bnx2x_slowpath)); + if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) { + /* allocate searcher T2 table */ + bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); + if (!bp->t2) + goto alloc_mem_err; + } - /* Allocated memory for FW statistics */ - if (bnx2x_alloc_fw_stats_mem(bp)) + bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping, + sizeof(struct host_sp_status_block)); + if (!bp->def_status_blk) goto alloc_mem_err; - bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); - - BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, - bp->context.size); + bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping, + sizeof(struct bnx2x_slowpath)); + if (!bp->slowpath) + goto alloc_mem_err; - BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); + /* Allocate memory for CDU context: + * This memory is allocated separately and not in the generic ILT + * functions because CDU differs in few aspects: + * 1. There are multiple entities allocating memory for context - + * 'regular' driver, CNIC and SRIOV driver. Each separately controls + * its own ILT lines. + * 2. Since CDU page-size is not a single 4KB page (which is the case + * for the other ILT clients), to be efficient we want to support + * allocation of sub-page-size in the last entry. + * 3. Context pointers are used by the driver to pass to FW / update + * the context (for the other ILT clients the pointers are used just to + * free the memory during unload). + */ + context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); + + for (i = 0, allocated = 0; allocated < context_size; i++) { + bp->context[i].size = min(CDU_ILT_PAGE_SZ, + (context_size - allocated)); + bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping, + bp->context[i].size); + if (!bp->context[i].vcxt) + goto alloc_mem_err; + allocated += bp->context[i].size; + } + bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line), + GFP_KERNEL); + if (!bp->ilt->lines) + goto alloc_mem_err; if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) goto alloc_mem_err; + if (bnx2x_iov_alloc_mem(bp)) + goto alloc_mem_err; + /* Slow path ring */ - BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); + bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE); + if (!bp->spq) + goto alloc_mem_err; /* EQ */ - BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, - BCM_PAGE_SIZE * NUM_EQ_PAGES); - - - /* fastpath */ - /* need to be done at the end, since it's self adjusting to amount - * of memory available for RSS queues - */ - if (bnx2x_alloc_fp_mem(bp)) + bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping, + BCM_PAGE_SIZE * NUM_EQ_PAGES); + if (!bp->eq_ring) goto alloc_mem_err; + return 0; alloc_mem_err: bnx2x_free_mem(bp); + BNX2X_ERR("Can't allocate memory\n"); return -ENOMEM; } @@ -7155,8 +8243,14 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, } rc = bnx2x_config_vlan_mac(bp, &ramrod_param); - if (rc < 0) + + if (rc == -EEXIST) { + DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); + /* do not treat adding same MAC as error */ + rc = 0; + } else if (rc < 0) BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); + return rc; } @@ -7183,26 +8277,33 @@ int bnx2x_del_all_macs(struct bnx2x *bp, int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) { - unsigned long ramrod_flags = 0; - -#ifdef BCM_CNIC - if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_ISCSI_SD(bp)) { - DP(NETIF_MSG_IFUP, "Ignoring Zero MAC for iSCSI SD mode\n"); + if (is_zero_ether_addr(bp->dev->dev_addr) && + (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { + DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, + "Ignoring Zero MAC for STORAGE SD mode\n"); return 0; } -#endif - DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); + if (IS_PF(bp)) { + unsigned long ramrod_flags = 0; - __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); - /* Eth MAC is set on RSS leading client (fp[0]) */ - return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, - BNX2X_ETH_MAC, &ramrod_flags); + DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + return bnx2x_set_mac_one(bp, bp->dev->dev_addr, + &bp->sp_objs->mac_obj, set, + BNX2X_ETH_MAC, &ramrod_flags); + } else { /* vf */ + return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, + bp->fp->index, true); + } } int bnx2x_setup_leading(struct bnx2x *bp) { - return bnx2x_setup_queue(bp, &bp->fp[0], 1); + if (IS_PF(bp)) + return bnx2x_setup_queue(bp, &bp->fp[0], true); + else /* VF */ + return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true); } /** @@ -7212,49 +8313,55 @@ int bnx2x_setup_leading(struct bnx2x *bp) * * In case of MSI-X it will also try to enable MSI-X. */ -static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) +int bnx2x_set_int_mode(struct bnx2x *bp) { + int rc = 0; + + if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) { + BNX2X_ERR("VF not loaded since interrupt mode not msix\n"); + return -EINVAL; + } + switch (int_mode) { - case INT_MODE_MSI: + case BNX2X_INT_MODE_MSIX: + /* attempt to enable msix */ + rc = bnx2x_enable_msix(bp); + + /* msix attained */ + if (!rc) + return 0; + + /* vfs use only msix */ + if (rc && IS_VF(bp)) + return rc; + + /* failed to enable multiple MSI-X */ + BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", + bp->num_queues, + 1 + bp->num_cnic_queues); + + /* falling through... */ + case BNX2X_INT_MODE_MSI: bnx2x_enable_msi(bp); + /* falling through... */ - case INT_MODE_INTx: - bp->num_queues = 1 + NON_ETH_CONTEXT_USE; - DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); + case BNX2X_INT_MODE_INTX: + bp->num_ethernet_queues = 1; + bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; + BNX2X_DEV_INFO("set number of queues to 1\n"); break; default: - /* Set number of queues according to bp->multi_mode value */ - bnx2x_set_num_queues(bp); - - DP(NETIF_MSG_IFUP, "set number of queues to %d\n", - bp->num_queues); - - /* if we can't use MSI-X we only need one fp, - * so try to enable MSI-X with the requested number of fp's - * and fallback to MSI or legacy INTx with one fp - */ - if (bnx2x_enable_msix(bp)) { - /* failed to enable MSI-X */ - if (bp->multi_mode) - DP(NETIF_MSG_IFUP, - "Multi requested but failed to " - "enable MSI-X (%d), " - "set number of queues to %d\n", - bp->num_queues, - 1 + NON_ETH_CONTEXT_USE); - bp->num_queues = 1 + NON_ETH_CONTEXT_USE; - - /* Try to enable MSI */ - if (!(bp->flags & DISABLE_MSI_FLAG)) - bnx2x_enable_msi(bp); - } - break; + BNX2X_DEV_INFO("unknown value in int_mode module parameter\n"); + return -EINVAL; } + return 0; } -/* must be called prioir to any HW initializations */ +/* must be called prior to any HW initializations */ static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) { + if (IS_SRIOV(bp)) + return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS; return L2_ILT_LINES(bp); } @@ -7274,13 +8381,12 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) ilt_client->flags = ILT_CLIENT_SKIP_MEM; ilt_client->start = line; line += bnx2x_cid_ilt_lines(bp); -#ifdef BCM_CNIC - line += CNIC_ILT_LINES; -#endif + + if (CNIC_SUPPORT(bp)) + line += CNIC_ILT_LINES; ilt_client->end = line - 1; - DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, " - "flags 0x%x, hw psz %d\n", + DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, @@ -7301,58 +8407,51 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) ilt_client->end = line - 1; - DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, " - "flags 0x%x, hw psz %d\n", + DP(NETIF_MSG_IFUP, + "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); - } - /* SRC */ - ilt_client = &ilt->clients[ILT_CLIENT_SRC]; -#ifdef BCM_CNIC - ilt_client->client_num = ILT_CLIENT_SRC; - ilt_client->page_size = SRC_ILT_PAGE_SZ; - ilt_client->flags = 0; - ilt_client->start = line; - line += SRC_ILT_LINES; - ilt_client->end = line - 1; - DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, " - "flags 0x%x, hw psz %d\n", - ilt_client->start, - ilt_client->end, - ilt_client->page_size, - ilt_client->flags, - ilog2(ilt_client->page_size >> 12)); + if (CNIC_SUPPORT(bp)) { + /* SRC */ + ilt_client = &ilt->clients[ILT_CLIENT_SRC]; + ilt_client->client_num = ILT_CLIENT_SRC; + ilt_client->page_size = SRC_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + line += SRC_ILT_LINES; + ilt_client->end = line - 1; -#else - ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); -#endif + DP(NETIF_MSG_IFUP, + "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", + ilt_client->start, + ilt_client->end, + ilt_client->page_size, + ilt_client->flags, + ilog2(ilt_client->page_size >> 12)); - /* TM */ - ilt_client = &ilt->clients[ILT_CLIENT_TM]; -#ifdef BCM_CNIC - ilt_client->client_num = ILT_CLIENT_TM; - ilt_client->page_size = TM_ILT_PAGE_SZ; - ilt_client->flags = 0; - ilt_client->start = line; - line += TM_ILT_LINES; - ilt_client->end = line - 1; + /* TM */ + ilt_client = &ilt->clients[ILT_CLIENT_TM]; + ilt_client->client_num = ILT_CLIENT_TM; + ilt_client->page_size = TM_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + line += TM_ILT_LINES; + ilt_client->end = line - 1; - DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, " - "flags 0x%x, hw psz %d\n", - ilt_client->start, - ilt_client->end, - ilt_client->page_size, - ilt_client->flags, - ilog2(ilt_client->page_size >> 12)); + DP(NETIF_MSG_IFUP, + "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", + ilt_client->start, + ilt_client->end, + ilt_client->page_size, + ilt_client->flags, + ilog2(ilt_client->page_size >> 12)); + } -#else - ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); -#endif BUG_ON(line > ILT_MAX_LINES); } @@ -7367,17 +8466,18 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) * - HC configuration * - Queue's CDU context */ -static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp, +static void bnx2x_pf_q_prep_init(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) { - u8 cos; + int cxt_index, cxt_offset; + /* FCoE Queue uses Default SB, thus has no HC capabilities */ if (!IS_FCOE_FP(fp)) { __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); - /* If HC is supporterd, enable host coalescing in the transition + /* If HC is supported, enable host coalescing in the transition * to INIT state. */ __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); @@ -7404,16 +8504,20 @@ static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp, /* set maximum number of COSs supported by this queue */ init_params->max_cos = fp->max_cos; - DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d\n", + DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n", fp->index, init_params->max_cos); /* set the context pointers queue object */ - for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) + for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { + cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS; + cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index * + ILT_PAGE_CIDS); init_params->cxts[cos] = - &bp->context.vcxt[fp->txdata[cos].cid].eth; + &bp->context[cxt_index].vcxt[cxt_offset].eth; + } } -int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, +static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_queue_state_params *q_params, struct bnx2x_queue_setup_tx_only_params *tx_only_params, int tx_index, bool leading) @@ -7435,9 +8539,8 @@ int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* Set Tx TX_ONLY_SETUP parameters */ bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); - DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:" - "cos %d, primary cid %d, cid %d, " - "client id %d, sp-client id %d, flags %lx\n", + DP(NETIF_MSG_IFUP, + "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n", tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, tx_only_params->gen_params.spcl_id, tx_only_params->flags); @@ -7446,7 +8549,6 @@ int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, return bnx2x_queue_state_change(bp, q_params); } - /** * bnx2x_setup_queue - setup queue * @@ -7461,7 +8563,7 @@ int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool leading) { - struct bnx2x_queue_state_params q_params = {0}; + struct bnx2x_queue_state_params q_params = {NULL}; struct bnx2x_queue_setup_params *setup_params = &q_params.params.setup; struct bnx2x_queue_setup_tx_only_params *tx_only_params = @@ -7469,14 +8571,14 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, int rc; u8 tx_index; - DP(BNX2X_MSG_SP, "setting up queue %d\n", fp->index); + DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index); /* reset IGU state skip FCoE L2 queue */ if (!IS_FCOE_FP(fp)) bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); - q_params.q_obj = &fp->q_obj; + q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; /* We want to wait for completion in this context */ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); @@ -7493,8 +8595,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, return rc; } - DP(BNX2X_MSG_SP, "init complete\n"); - + DP(NETIF_MSG_IFUP, "init complete\n"); /* Now move the Queue to the SETUP state... */ memset(setup_params, 0, sizeof(*setup_params)); @@ -7515,6 +8616,9 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* Set the command */ q_params.cmd = BNX2X_Q_CMD_SETUP; + if (IS_FCOE_FP(fp)) + bp->fcoe_init = true; + /* Change the state to SETUP */ rc = bnx2x_queue_state_change(bp, &q_params); if (rc) { @@ -7544,25 +8648,24 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index) { struct bnx2x_fastpath *fp = &bp->fp[index]; struct bnx2x_fp_txdata *txdata; - struct bnx2x_queue_state_params q_params = {0}; + struct bnx2x_queue_state_params q_params = {NULL}; int rc, tx_index; - DP(BNX2X_MSG_SP, "stopping queue %d cid %d\n", index, fp->cid); + DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); - q_params.q_obj = &fp->q_obj; + q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; /* We want to wait for completion in this context */ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); - /* close tx-only connections */ for (tx_index = FIRST_TX_ONLY_COS_INDEX; tx_index < fp->max_cos; tx_index++){ /* ascertain this is a normal queue*/ - txdata = &fp->txdata[tx_index]; + txdata = fp->txdata_ptr[tx_index]; - DP(BNX2X_MSG_SP, "stopping tx-only queue %d\n", + DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", txdata->txq_index); /* send halt terminate on tx-only connection */ @@ -7607,7 +8710,6 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index) return bnx2x_queue_state_change(bp, &q_params); } - static void bnx2x_reset_func(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -7628,16 +8730,16 @@ static void bnx2x_reset_func(struct bnx2x *bp) SB_DISABLED); } -#ifdef BCM_CNIC - /* CNIC SB */ - REG_WR8(bp, BAR_CSTRORM_INTMEM + - CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)), - SB_DISABLED); -#endif + if (CNIC_LOADED(bp)) + /* CNIC SB */ + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET + (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED); + /* SP SB */ REG_WR8(bp, BAR_CSTRORM_INTMEM + - CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), - SB_DISABLED); + CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), + SB_DISABLED); for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), @@ -7652,19 +8754,19 @@ static void bnx2x_reset_func(struct bnx2x *bp) REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); } -#ifdef BCM_CNIC - /* Disable Timer scan */ - REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); - /* - * Wait for at least 10ms and up to 2 second for the timers scan to - * complete - */ - for (i = 0; i < 200; i++) { - msleep(10); - if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) - break; + if (CNIC_LOADED(bp)) { + /* Disable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); + /* + * Wait for at least 10ms and up to 2 second for the timers + * scan to complete + */ + for (i = 0; i < 200; i++) { + usleep_range(10000, 20000); + if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) + break; + } } -#endif /* Clear ILT */ bnx2x_clear_func_ilt(bp, func); @@ -7718,9 +8820,9 @@ static void bnx2x_reset_port(struct bnx2x *bp) /* TODO: Close Doorbell port? */ } -static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) +static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) { - struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_state_params func_params = {NULL}; /* Prepare parameters for function state transitions */ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); @@ -7733,9 +8835,9 @@ static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) return bnx2x_func_state_change(bp, &func_params); } -static inline int bnx2x_func_stop(struct bnx2x *bp) +static int bnx2x_func_stop(struct bnx2x *bp) { - struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_state_params func_params = {NULL}; int rc; /* Prepare parameters for function state transitions */ @@ -7754,8 +8856,7 @@ static inline int bnx2x_func_stop(struct bnx2x *bp) #ifdef BNX2X_STOP_ON_ERROR return rc; #else - BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry " - "transaction\n"); + BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n"); __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); return bnx2x_func_state_change(bp, &func_params); #endif @@ -7787,6 +8888,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) else if (bp->wol) { u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; u8 *mac_addr = bp->dev->dev_addr; + struct pci_dev *pdev = bp->pdev; u32 val; u16 pmc; @@ -7803,9 +8905,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); /* Enable the PME and clear the status */ - pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); + pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc); pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); + pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc); reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; @@ -7818,19 +8920,17 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) else { int path = BP_PATH(bp); - DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] " - "%d, %d, %d\n", - path, load_count[path][0], load_count[path][1], - load_count[path][2]); - load_count[path][0]--; - load_count[path][1 + port]--; - DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] " - "%d, %d, %d\n", - path, load_count[path][0], load_count[path][1], - load_count[path][2]); - if (load_count[path][0] == 0) + DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", + path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], + bnx2x_load_count[path][2]); + bnx2x_load_count[path][0]--; + bnx2x_load_count[path][1 + port]--; + DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", + path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], + bnx2x_load_count[path][2]); + if (bnx2x_load_count[path][0] == 0) reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; - else if (load_count[path][1 + port] == 0) + else if (bnx2x_load_count[path][1 + port] == 0) reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; else reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; @@ -7843,15 +8943,18 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. * * @bp: driver handle + * @keep_link: true iff link should be kept up */ -void bnx2x_send_unload_done(struct bnx2x *bp) +void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link) { + u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; + /* Report UNLOAD_DONE to MCP */ if (!BP_NOMCP(bp)) - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param); } -static inline int bnx2x_func_wait_started(struct bnx2x *bp) +static int bnx2x_func_wait_started(struct bnx2x *bp) { int tout = 50; int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; @@ -7861,14 +8964,14 @@ static inline int bnx2x_func_wait_started(struct bnx2x *bp) /* * (assumption: No Attention from MCP at this stage) - * PMF probably in the middle of TXdisable/enable transaction + * PMF probably in the middle of TX disable/enable transaction * 1. Sync IRS for default SB - * 2. Sync SP queue - this guarantes us that attention handling started - * 3. Wait, that TXdisable/enable transaction completes + * 2. Sync SP queue - this guarantees us that attention handling started + * 3. Wait, that TX disable/enable transaction completes * - * 1+2 guranty that if DCBx attention was scheduled it already changed - * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy - * received complettion for the transaction the state is TX_STOPPED. + * 1+2 guarantee that if DCBx attention was scheduled it already changed + * pending bit of transaction from STARTED-->TX_STOPPED, if we already + * received completion for the transaction the state is TX_STOPPED. * State will return to STARTED after completion of TX_STOPPED-->STARTED * transaction. */ @@ -7880,6 +8983,7 @@ static inline int bnx2x_func_wait_started(struct bnx2x *bp) synchronize_irq(bp->pdev->irq); flush_workqueue(bnx2x_wq); + flush_workqueue(bnx2x_iov_wq); while (bnx2x_func_get_state(bp, &bp->func_obj) != BNX2X_F_STATE_STARTED && tout--) @@ -7888,16 +8992,17 @@ static inline int bnx2x_func_wait_started(struct bnx2x *bp) if (bnx2x_func_get_state(bp, &bp->func_obj) != BNX2X_F_STATE_STARTED) { #ifdef BNX2X_STOP_ON_ERROR + BNX2X_ERR("Wrong function state\n"); return -EBUSY; #else /* * Failed to complete the transaction in a "good way" * Force both transactions with CLR bit */ - struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_state_params func_params = {NULL}; - DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! " - "Forcing STARTED-->TX_ST0PPED-->STARTED\n"); + DP(NETIF_MSG_IFDOWN, + "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); func_params.f_obj = &bp->func_obj; __set_bit(RAMROD_DRV_CLR_ONLY, @@ -7916,12 +9021,12 @@ static inline int bnx2x_func_wait_started(struct bnx2x *bp) return 0; } -void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) { int port = BP_PORT(bp); int i, rc = 0; u8 cos; - struct bnx2x_mcast_ramrod_params rparam = {0}; + struct bnx2x_mcast_ramrod_params rparam = {NULL}; u32 reset_code; /* Wait until tx fastpath tasks complete */ @@ -7929,7 +9034,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_cos_in_tx_queue(fp, cos) - rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]); + rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); #ifdef BNX2X_STOP_ON_ERROR if (rc) return; @@ -7937,19 +9042,20 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) } /* Give HW time to discard old tx messages */ - usleep_range(1000, 1000); + usleep_range(1000, 2000); /* Clean all ETH MACs */ - rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); + rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC, + false); if (rc < 0) BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); /* Clean up UC list */ - rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, + rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC, true); if (rc < 0) - BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: " - "%d\n", rc); + BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", + rc); /* Disable LLH */ if (!CHIP_IS_E1(bp)) @@ -7974,7 +9080,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) netif_addr_unlock_bh(bp->dev); - + bnx2x_iov_chip_cleanup(bp); /* * Send the UNLOAD_REQUEST to the MCP. This will return if @@ -7985,7 +9091,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) /* * (assumption: No Attention from MCP at this stage) - * PMF probably in the middle of TXdisable/enable transaction + * PMF probably in the middle of TX disable/enable transaction */ rc = bnx2x_func_wait_started(bp); if (rc) { @@ -7998,13 +9104,24 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) /* Close multi and leading connections * Completions for ramrods are collected in a synchronous way */ - for_each_queue(bp, i) + for_each_eth_queue(bp, i) if (bnx2x_stop_queue(bp, i)) #ifdef BNX2X_STOP_ON_ERROR return; #else goto unload_error; #endif + + if (CNIC_LOADED(bp)) { + for_each_cnic_queue(bp, i) + if (bnx2x_stop_queue(bp, i)) +#ifdef BNX2X_STOP_ON_ERROR + return; +#else + goto unload_error; +#endif + } + /* If SP settings didn't get completed so far - something * very wrong has happen. */ @@ -8024,6 +9141,10 @@ unload_error: /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + if (CNIC_LOADED(bp)) + bnx2x_del_all_napi_cnic(bp); /* Release IRQs */ bnx2x_free_irq(bp); @@ -8033,16 +9154,15 @@ unload_error: if (rc) BNX2X_ERR("HW_RESET failed\n"); - /* Report UNLOAD_DONE to MCP */ - bnx2x_send_unload_done(bp); + bnx2x_send_unload_done(bp, keep_link); } void bnx2x_disable_close_the_gate(struct bnx2x *bp) { u32 val; - DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n"); + DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n"); if (CHIP_IS_E1(bp)) { int port = BP_PORT(bp); @@ -8086,7 +9206,7 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); } else { - /* Prevent incomming interrupts in IGU */ + /* Prevent incoming interrupts in IGU */ val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, @@ -8095,7 +9215,7 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); } - DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n", + DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", close ? "closing" : "opening"); mmiowb(); } @@ -8137,7 +9257,7 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) u32 shmem; u32 validity_offset; - DP(NETIF_MSG_HW, "Starting\n"); + DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n"); /* Set `magic' bit in order to save MF config */ if (!CHIP_IS_E1(bp)) @@ -8145,7 +9265,8 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) /* Get shmem offset */ shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); - validity_offset = offsetof(struct shmem_region, validity_map[0]); + validity_offset = + offsetof(struct shmem_region, validity_map[BP_PORT(bp)]); /* Clear validity map flags */ if (shmem > 0) @@ -8160,7 +9281,7 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) * * @bp: driver handle */ -static inline void bnx2x_mcp_wait_one(struct bnx2x *bp) +static void bnx2x_mcp_wait_one(struct bnx2x *bp) { /* special handling for emulation and FPGA, wait 10 times longer */ @@ -8238,7 +9359,11 @@ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; - /* Don't reset the following blocks */ + /* Don't reset the following blocks. + * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be + * reset, as in 4 port device they might still be owned + * by the MCP (there is only one leader per path). + */ not_reset_mask1 = MISC_REGISTERS_RESET_REG_1_RST_HC | MISC_REGISTERS_RESET_REG_1_RST_PXPV | @@ -8254,19 +9379,19 @@ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | MISC_REGISTERS_RESET_REG_2_RST_ATC | - MISC_REGISTERS_RESET_REG_2_PGLC; + MISC_REGISTERS_RESET_REG_2_PGLC | + MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | + MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | + MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | + MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | + MISC_REGISTERS_RESET_REG_2_UMAC0 | + MISC_REGISTERS_RESET_REG_2_UMAC1; /* * Keep the following blocks in reset: * - all xxMACs are handled by the bnx2x_link code. */ stay_reset2 = - MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | - MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | - MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | - MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | - MISC_REGISTERS_RESET_REG_2_UMAC0 | - MISC_REGISTERS_RESET_REG_2_UMAC1 | MISC_REGISTERS_RESET_REG_2_XMAC | MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; @@ -8339,7 +9464,7 @@ static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) if (pend_bits == 0) break; - usleep_range(1000, 1000); + usleep_range(1000, 2000); } while (cnt-- > 0); if (cnt <= 0) { @@ -8356,7 +9481,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) int cnt = 1000; u32 val = 0; u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; - + u32 tags_63_32 = 0; /* Empty the Tetris buffer, wait for 1s */ do { @@ -8365,21 +9490,21 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); + if (CHIP_IS_E3(bp)) + tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32); + if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && ((port_is_idle_0 & 0x1) == 0x1) && ((port_is_idle_1 & 0x1) == 0x1) && - (pgl_exp_rom2 == 0xffffffff)) + (pgl_exp_rom2 == 0xffffffff) && + (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff))) break; - usleep_range(1000, 1000); + usleep_range(1000, 2000); } while (cnt-- > 0); if (cnt <= 0) { - DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there" - " are still" - " outstanding read requests after 1s!\n"); - DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x," - " port_is_idle_0=0x%08x," - " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", + BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n"); + BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2); return -EAGAIN; @@ -8394,7 +9519,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) return -EAGAIN; - /* TBD: Indicate that "process kill" is in progress to MCP */ /* Clear "unprepared" bit */ @@ -8407,7 +9531,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) /* Wait for 1ms to empty GLUE and PCI-E core queues, * PSWHST, GRC and PSWRD Tetris buffer. */ - usleep_range(1000, 1000); + usleep_range(1000, 2000); /* Prepare to chip reset: */ /* MCP */ @@ -8422,6 +9546,10 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) bnx2x_process_kill_chip_reset(bp, global); barrier(); + /* clear errors in PGB */ + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); + /* Recover after reset: */ /* MCP */ if (global && bnx2x_reset_mcp_comp(bp, val)) @@ -8429,9 +9557,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) /* TBD: Add resetting the NO_MCP mode DB here */ - /* PXP */ - bnx2x_pxp_prep(bp); - /* Open the gates #2, #3 and #4 */ bnx2x_set_234_gates(bp, false); @@ -8441,17 +9566,43 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) return 0; } -int bnx2x_leader_reset(struct bnx2x *bp) +static int bnx2x_leader_reset(struct bnx2x *bp) { int rc = 0; bool global = bnx2x_reset_is_global(bp); + u32 load_code; + + /* if not going to reset MCP - load "fake" driver to reset HW while + * driver is owner of the HW + */ + if (!global && !BP_NOMCP(bp)) { + load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, + DRV_MSG_CODE_LOAD_REQ_WITH_LFA); + if (!load_code) { + BNX2X_ERR("MCP response failure, aborting\n"); + rc = -EAGAIN; + goto exit_leader_reset; + } + if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && + (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { + BNX2X_ERR("MCP unexpected resp, aborting\n"); + rc = -EAGAIN; + goto exit_leader_reset2; + } + load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + if (!load_code) { + BNX2X_ERR("MCP response failure, aborting\n"); + rc = -EAGAIN; + goto exit_leader_reset2; + } + } /* Try to recover after the failure */ if (bnx2x_process_kill(bp, global)) { - netdev_err(bp->dev, "Something bad had happen on engine %d! " - "Aii!\n", BP_PATH(bp)); + BNX2X_ERR("Something bad had happen on engine %d! Aii!\n", + BP_PATH(bp)); rc = -EAGAIN; - goto exit_leader_reset; + goto exit_leader_reset2; } /* @@ -8462,6 +9613,12 @@ int bnx2x_leader_reset(struct bnx2x *bp) if (global) bnx2x_clear_reset_global(bp); +exit_leader_reset2: + /* unload "fake driver" if it was loaded */ + if (!global && !BP_NOMCP(bp)) { + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + } exit_leader_reset: bp->is_leader = 0; bnx2x_release_leader_lock(bp); @@ -8469,7 +9626,7 @@ exit_leader_reset: return rc; } -static inline void bnx2x_recovery_failed(struct bnx2x *bp) +static void bnx2x_recovery_failed(struct bnx2x *bp) { netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); @@ -8498,13 +9655,16 @@ static inline void bnx2x_recovery_failed(struct bnx2x *bp) static void bnx2x_parity_recover(struct bnx2x *bp) { bool global = false; + u32 error_recovered, error_unrecovered; + bool is_parity; DP(NETIF_MSG_HW, "Handling parity\n"); while (1) { switch (bp->recovery_state) { case BNX2X_RECOVERY_INIT: DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); - bnx2x_chk_parity_attn(bp, &global, false); + is_parity = bnx2x_chk_parity_attn(bp, &global, false); + WARN_ON(!is_parity); /* Try to get a LEADER_LOCK HW lock */ if (bnx2x_trylock_leader_lock(bp)) { @@ -8523,20 +9683,11 @@ static void bnx2x_parity_recover(struct bnx2x *bp) /* Stop the driver */ /* If interface has been removed - break */ - if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) + if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false)) return; bp->recovery_state = BNX2X_RECOVERY_WAIT; - /* - * Reset MCP command sequence number and MCP mail box - * sequence as we are going to reset the MCP. - */ - if (global) { - bp->fw_seq = 0; - bp->fw_drv_pulse_wr_seq = 0; - } - /* Ensure "is_leader", MCP command sequence and * "recovery_state" update values are seen on other * CPUs. @@ -8548,10 +9699,10 @@ static void bnx2x_parity_recover(struct bnx2x *bp) DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); if (bp->is_leader) { int other_engine = BP_PATH(bp) ? 0 : 1; - u32 other_load_counter = - bnx2x_get_load_cnt(bp, other_engine); - u32 load_counter = - bnx2x_get_load_cnt(bp, BP_PATH(bp)); + bool other_load_status = + bnx2x_get_load_status(bp, other_engine); + bool load_status = + bnx2x_get_load_status(bp, BP_PATH(bp)); global = bnx2x_reset_is_global(bp); /* @@ -8559,11 +9710,11 @@ static void bnx2x_parity_recover(struct bnx2x *bp) * the first leader that performs a * leader_reset() reset the global blocks in * order to clear global attentions. Otherwise - * the the gates will remain closed for that + * the gates will remain closed for that * engine. */ - if (load_counter || - (global && other_load_counter)) { + if (load_status || + (global && other_load_status)) { /* Wait until all other functions get * down. */ @@ -8620,13 +9771,32 @@ static void bnx2x_parity_recover(struct bnx2x *bp) return; } - if (bnx2x_nic_load(bp, LOAD_NORMAL)) - bnx2x_recovery_failed(bp); - else { + error_recovered = + bp->eth_stats.recoverable_error; + error_unrecovered = + bp->eth_stats.unrecoverable_error; + bp->recovery_state = + BNX2X_RECOVERY_NIC_LOADING; + if (bnx2x_nic_load(bp, LOAD_NORMAL)) { + error_unrecovered++; + netdev_err(bp->dev, + "Recovery failed. Power cycle needed\n"); + /* Disconnect this device */ + netif_device_detach(bp->dev); + /* Shut down the power */ + bnx2x_set_power_state( + bp, PCI_D3hot); + smp_mb(); + } else { bp->recovery_state = BNX2X_RECOVERY_DONE; + error_recovered++; smp_mb(); } + bp->eth_stats.recoverable_error = + error_recovered; + bp->eth_stats.unrecoverable_error = + error_unrecovered; return; } @@ -8637,6 +9807,8 @@ static void bnx2x_parity_recover(struct bnx2x *bp) } } +static int bnx2x_close(struct net_device *dev); + /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is * scheduled on a general queue in order to prevent a dead lock. */ @@ -8646,18 +9818,17 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) rtnl_lock(); - if (!netif_running(bp->dev)) - goto sp_rtnl_exit; + if (!netif_running(bp->dev)) { + rtnl_unlock(); + return; + } - /* if stop on error is defined no recovery flows should be executed */ + if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { #ifdef BNX2X_STOP_ON_ERROR - BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined " - "so reset not done to allow debug dump,\n" - "you will need to reboot when done\n"); - goto sp_rtnl_not_reset; + BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" + "you will need to reboot when done\n"); + goto sp_rtnl_not_reset; #endif - - if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { /* * Clear all pending SP commands as we are going to reset the * function anyway. @@ -8667,10 +9838,17 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) bnx2x_parity_recover(bp); - goto sp_rtnl_exit; + rtnl_unlock(); + return; } if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { +#ifdef BNX2X_STOP_ON_ERROR + BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" + "you will need to reboot when done\n"); + goto sp_rtnl_not_reset; +#endif + /* * Clear all pending SP commands as we are going to reset the * function anyway. @@ -8678,33 +9856,75 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) bp->sp_rtnl_state = 0; smp_mb(); - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); bnx2x_nic_load(bp, LOAD_NORMAL); - goto sp_rtnl_exit; + rtnl_unlock(); + return; } #ifdef BNX2X_STOP_ON_ERROR sp_rtnl_not_reset: #endif if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); - + if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) + bnx2x_after_function_update(bp); /* * in case of fan failure we need to reset id if the "stop on error" * debug flag is set, since we trying to prevent permanent overheating * damage */ if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { - DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n"); + DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); netif_device_detach(bp->dev); bnx2x_close(bp->dev); + rtnl_unlock(); + return; } -sp_rtnl_exit: + if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) { + DP(BNX2X_MSG_SP, + "sending set mcast vf pf channel message from rtnl sp-task\n"); + bnx2x_vfpf_set_mcast(bp->dev); + } + if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, + &bp->sp_rtnl_state)){ + if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) { + bnx2x_tx_disable(bp); + BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n"); + } + } + + if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) { + DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n"); + bnx2x_set_rx_mode_inner(bp); + } + + if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, + &bp->sp_rtnl_state)) + bnx2x_pf_set_vfs_vlan(bp); + + if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { + bnx2x_dcbx_stop_hw_tx(bp); + bnx2x_dcbx_resume_hw_tx(bp); + } + + if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION, + &bp->sp_rtnl_state)) + bnx2x_update_mng_version(bp); + + /* work which needs rtnl lock not-taken (as it takes the lock itself and + * can be called from other contexts as well) + */ rtnl_unlock(); -} -/* end of nic load/unload */ + /* enable SR-IOV if applicable */ + if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, + &bp->sp_rtnl_state)) { + bnx2x_disable_sriov(bp); + bnx2x_enable_sriov(bp); + } +} static void bnx2x_period_task(struct work_struct *work) { @@ -8748,143 +9968,598 @@ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) return base + (BP_ABS_FUNC(bp)) * stride; } -static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp) +static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, + struct bnx2x_mac_vals *vals) { - u32 reg = bnx2x_get_pretend_reg(bp); + u32 val, base_addr, offset, mask, reset_reg; + bool mac_stopped = false; + u8 port = BP_PORT(bp); - /* Flush all outstanding writes */ - mmiowb(); + /* reset addresses as they also mark which values were changed */ + vals->bmac_addr = 0; + vals->umac_addr = 0; + vals->xmac_addr = 0; + vals->emac_addr = 0; - /* Pretend to be function 0 */ - REG_WR(bp, reg, 0); - REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */ + reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); - /* From now we are in the "like-E1" mode */ - bnx2x_int_disable(bp); + if (!CHIP_IS_E3(bp)) { + val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); + mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; + if ((mask & reset_reg) && val) { + u32 wb_data[2]; + BNX2X_DEV_INFO("Disable bmac Rx\n"); + base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM + : NIG_REG_INGRESS_BMAC0_MEM; + offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL + : BIGMAC_REGISTER_BMAC_CONTROL; - /* Flush all outstanding writes */ - mmiowb(); + /* + * use rd/wr since we cannot use dmae. This is safe + * since MCP won't access the bus due to the request + * to unload, and no function on the path can be + * loaded at this time. + */ + wb_data[0] = REG_RD(bp, base_addr + offset); + wb_data[1] = REG_RD(bp, base_addr + offset + 0x4); + vals->bmac_addr = base_addr + offset; + vals->bmac_val[0] = wb_data[0]; + vals->bmac_val[1] = wb_data[1]; + wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; + REG_WR(bp, vals->bmac_addr, wb_data[0]); + REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); + } + BNX2X_DEV_INFO("Disable emac Rx\n"); + vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; + vals->emac_val = REG_RD(bp, vals->emac_addr); + REG_WR(bp, vals->emac_addr, 0); + mac_stopped = true; + } else { + if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { + BNX2X_DEV_INFO("Disable xmac Rx\n"); + base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI); + REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, + val & ~(1 << 1)); + REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, + val | (1 << 1)); + vals->xmac_addr = base_addr + XMAC_REG_CTRL; + vals->xmac_val = REG_RD(bp, vals->xmac_addr); + REG_WR(bp, vals->xmac_addr, 0); + mac_stopped = true; + } + mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; + if (mask & reset_reg) { + BNX2X_DEV_INFO("Disable umac Rx\n"); + base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; + vals->umac_val = REG_RD(bp, vals->umac_addr); + REG_WR(bp, vals->umac_addr, 0); + mac_stopped = true; + } + } - /* Restore the original function */ - REG_WR(bp, reg, BP_ABS_FUNC(bp)); - REG_RD(bp, reg); + if (mac_stopped) + msleep(20); } -static inline void bnx2x_undi_int_disable(struct bnx2x *bp) +#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) +#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) +#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) +#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) + +#define BCM_5710_UNDI_FW_MF_MAJOR (0x07) +#define BCM_5710_UNDI_FW_MF_MINOR (0x08) +#define BCM_5710_UNDI_FW_MF_VERS (0x05) +#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4)) +#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4)) + +static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) { - if (CHIP_IS_E1(bp)) - bnx2x_int_disable(bp); - else - bnx2x_undi_int_disable_e1h(bp); + /* UNDI marks its presence in DORQ - + * it initializes CID offset for normal bell to 0x7 + */ + if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & + MISC_REGISTERS_RESET_REG_1_RST_DORQ)) + return false; + + if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) { + BNX2X_DEV_INFO("UNDI previously loaded\n"); + return true; + } + + return false; } -static void __devinit bnx2x_undi_unload(struct bnx2x *bp) +static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) { - u32 val; + u8 major, minor, version; + u32 fw; - /* Check if there is any driver already loaded */ - val = REG_RD(bp, MISC_REG_UNPREPARED); - if (val == 0x1) { + /* Must check that FW is loaded */ + if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & + MISC_REGISTERS_RESET_REG_1_RST_XSEM)) { + BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n"); + return false; + } - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); - /* - * Check if it is the UNDI driver - * UNDI driver initializes CID offset for normal bell to 0x7 - */ - val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); - if (val == 0x7) { - u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - /* save our pf_num */ - int orig_pf_num = bp->pf_num; - int port; - u32 swap_en, swap_val, value; + /* Read Currently loaded FW version */ + fw = REG_RD(bp, XSEM_REG_PRAM); + major = fw & 0xff; + minor = (fw >> 0x8) & 0xff; + version = (fw >> 0x10) & 0xff; + BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n", + fw, major, minor, version); + + if (major > BCM_5710_UNDI_FW_MF_MAJOR) + return true; + + if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && + (minor > BCM_5710_UNDI_FW_MF_MINOR)) + return true; + + if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && + (minor == BCM_5710_UNDI_FW_MF_MINOR) && + (version >= BCM_5710_UNDI_FW_MF_VERS)) + return true; + + return false; +} + +static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp) +{ + int i; + + /* Due to legacy (FW) code, the first function on each engine has a + * different offset macro from the rest of the functions. + * Setting this for all 8 functions is harmless regardless of whether + * this is actually a multi-function device. + */ + for (i = 0; i < 2; i++) + REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1); + + for (i = 2; i < 8; i++) + REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1); + + BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n"); +} + +static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc) +{ + u16 rcq, bd; + u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port)); + + rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; + bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; + + tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); + REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); + BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", + port, bd, rcq); +} + +static int bnx2x_prev_mcp_done(struct bnx2x *bp) +{ + u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, + DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); + if (!rc) { + BNX2X_ERR("MCP response failure, aborting\n"); + return -EBUSY; + } + + return 0; +} + +static struct bnx2x_prev_path_list * + bnx2x_prev_path_get_entry(struct bnx2x *bp) +{ + struct bnx2x_prev_path_list *tmp_list; + + list_for_each_entry(tmp_list, &bnx2x_prev_list, list) + if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && + bp->pdev->bus->number == tmp_list->bus && + BP_PATH(bp) == tmp_list->path) + return tmp_list; + + return NULL; +} + +static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp) +{ + struct bnx2x_prev_path_list *tmp_list; + int rc; + + rc = down_interruptible(&bnx2x_prev_sem); + if (rc) { + BNX2X_ERR("Received %d when tried to take lock\n", rc); + return rc; + } + + tmp_list = bnx2x_prev_path_get_entry(bp); + if (tmp_list) { + tmp_list->aer = 1; + rc = 0; + } else { + BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n", + BP_PATH(bp)); + } + + up(&bnx2x_prev_sem); + + return rc; +} + +static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) +{ + struct bnx2x_prev_path_list *tmp_list; + bool rc = false; + + if (down_trylock(&bnx2x_prev_sem)) + return false; + + tmp_list = bnx2x_prev_path_get_entry(bp); + if (tmp_list) { + if (tmp_list->aer) { + DP(NETIF_MSG_HW, "Path %d was marked by AER\n", + BP_PATH(bp)); + } else { + rc = true; + BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n", + BP_PATH(bp)); + } + } + + up(&bnx2x_prev_sem); + + return rc; +} + +bool bnx2x_port_after_undi(struct bnx2x *bp) +{ + struct bnx2x_prev_path_list *entry; + bool val; + + down(&bnx2x_prev_sem); + + entry = bnx2x_prev_path_get_entry(bp); + val = !!(entry && (entry->undi & (1 << BP_PORT(bp)))); + + up(&bnx2x_prev_sem); + + return val; +} + +static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) +{ + struct bnx2x_prev_path_list *tmp_list; + int rc; + + rc = down_interruptible(&bnx2x_prev_sem); + if (rc) { + BNX2X_ERR("Received %d when tried to take lock\n", rc); + return rc; + } + + /* Check whether the entry for this path already exists */ + tmp_list = bnx2x_prev_path_get_entry(bp); + if (tmp_list) { + if (!tmp_list->aer) { + BNX2X_ERR("Re-Marking the path.\n"); + } else { + DP(NETIF_MSG_HW, "Removing AER indication from path %d\n", + BP_PATH(bp)); + tmp_list->aer = 0; + } + up(&bnx2x_prev_sem); + return 0; + } + up(&bnx2x_prev_sem); + + /* Create an entry for this path and add it */ + tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); + if (!tmp_list) { + BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); + return -ENOMEM; + } + + tmp_list->bus = bp->pdev->bus->number; + tmp_list->slot = PCI_SLOT(bp->pdev->devfn); + tmp_list->path = BP_PATH(bp); + tmp_list->aer = 0; + tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0; + + rc = down_interruptible(&bnx2x_prev_sem); + if (rc) { + BNX2X_ERR("Received %d when tried to take lock\n", rc); + kfree(tmp_list); + } else { + DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n", + BP_PATH(bp)); + list_add(&tmp_list->list, &bnx2x_prev_list); + up(&bnx2x_prev_sem); + } + + return rc; +} + +static int bnx2x_do_flr(struct bnx2x *bp) +{ + struct pci_dev *dev = bp->pdev; + + if (CHIP_IS_E1x(bp)) { + BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); + return -EINVAL; + } + + /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ + if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { + BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", + bp->common.bc_ver); + return -EINVAL; + } + + if (!pci_wait_for_pending_transaction(dev)) + dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); + + BNX2X_DEV_INFO("Initiating FLR\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); + + return 0; +} + +static int bnx2x_prev_unload_uncommon(struct bnx2x *bp) +{ + int rc; + + BNX2X_DEV_INFO("Uncommon unload Flow\n"); + + /* Test if previous unload process was already finished for this path */ + if (bnx2x_prev_is_path_marked(bp)) + return bnx2x_prev_mcp_done(bp); + + BNX2X_DEV_INFO("Path is unmarked\n"); + + /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */ + if (bnx2x_prev_is_after_undi(bp)) + goto out; + + /* If function has FLR capabilities, and existing FW version matches + * the one required, then FLR will be sufficient to clean any residue + * left by previous driver + */ + rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false); + + if (!rc) { + /* fw version is good */ + BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n"); + rc = bnx2x_do_flr(bp); + } + + if (!rc) { + /* FLR was performed */ + BNX2X_DEV_INFO("FLR successful\n"); + return 0; + } + + BNX2X_DEV_INFO("Could not FLR\n"); + +out: + /* Close the MCP request, return failure*/ + rc = bnx2x_prev_mcp_done(bp); + if (!rc) + rc = BNX2X_PREV_WAIT_NEEDED; + + return rc; +} + +static int bnx2x_prev_unload_common(struct bnx2x *bp) +{ + u32 reset_reg, tmp_reg = 0, rc; + bool prev_undi = false; + struct bnx2x_mac_vals mac_vals; + + /* It is possible a previous function received 'common' answer, + * but hasn't loaded yet, therefore creating a scenario of + * multiple functions receiving 'common' on the same path. + */ + BNX2X_DEV_INFO("Common unload Flow\n"); + + memset(&mac_vals, 0, sizeof(mac_vals)); + + if (bnx2x_prev_is_path_marked(bp)) + return bnx2x_prev_mcp_done(bp); + + reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); + + /* Reset should be performed after BRB is emptied */ + if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { + u32 timer_count = 1000; + bool need_write = true; + + /* Close the MAC Rx to prevent BRB from filling up */ + bnx2x_prev_unload_close_mac(bp, &mac_vals); + + /* close LLH filters towards the BRB */ + bnx2x_set_rx_filter(&bp->link_params, 0); + + /* Check if the UNDI driver was previously loaded */ + if (bnx2x_prev_is_after_undi(bp)) { + prev_undi = true; /* clear the UNDI indication */ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); + /* clear possible idle check errors */ + REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); + } + if (!CHIP_IS_E1x(bp)) + /* block FW from writing to host */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); - BNX2X_DEV_INFO("UNDI is active! reset device\n"); - - /* try unload UNDI on port 0 */ - bp->pf_num = 0; - bp->fw_seq = - (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); - reset_code = bnx2x_fw_command(bp, reset_code, 0); + /* wait until BRB is empty */ + tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); + while (timer_count) { + u32 prev_brb = tmp_reg; - /* if UNDI is loaded on the other port */ - if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { + tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); + if (!tmp_reg) + break; - /* send "DONE" for previous unload */ - bnx2x_fw_command(bp, - DRV_MSG_CODE_UNLOAD_DONE, 0); + BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg); - /* unload UNDI on port 1 */ - bp->pf_num = 1; - bp->fw_seq = - (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + /* reset timer as long as BRB actually gets emptied */ + if (prev_brb > tmp_reg) + timer_count = 1000; + else + timer_count--; - bnx2x_fw_command(bp, reset_code, 0); + /* New UNDI FW supports MF and contains better + * cleaning methods - might be redundant but harmless. + */ + if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { + if (need_write) { + bnx2x_prev_unload_undi_mf(bp); + need_write = false; + } + } else if (prev_undi) { + /* If UNDI resides in memory, + * manually increment it + */ + bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1); } + udelay(10); + } - bnx2x_undi_int_disable(bp); - port = BP_PORT(bp); - - /* close input traffic and wait for it */ - /* Do not rcv packets to BRB */ - REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK : - NIG_REG_LLH0_BRB1_DRV_MASK), 0x0); - /* Do not direct rcv packets that are not for MCP to - * the BRB */ - REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : - NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); - /* clear AEU */ - REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : - MISC_REG_AEU_MASK_ATTN_FUNC_0), 0); - msleep(10); - - /* save NIG port swap info */ - swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); - swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); - /* reset device */ - REG_WR(bp, - GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, - 0xd3ffffff); + if (!timer_count) + BNX2X_ERR("Failed to empty BRB, hope for the best\n"); + } - value = 0x1400; - if (CHIP_IS_E3(bp)) { - value |= MISC_REGISTERS_RESET_REG_2_MSTAT0; - value |= MISC_REGISTERS_RESET_REG_2_MSTAT1; - } + /* No packets are in the pipeline, path is ready for reset */ + bnx2x_reset_common(bp); - REG_WR(bp, - GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - value); + if (mac_vals.xmac_addr) + REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); + if (mac_vals.umac_addr) + REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val); + if (mac_vals.emac_addr) + REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); + if (mac_vals.bmac_addr) { + REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]); + REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); + } - /* take the NIG out of reset and restore swap values */ - REG_WR(bp, - GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, - MISC_REGISTERS_RESET_REG_1_RST_NIG); - REG_WR(bp, NIG_REG_PORT_SWAP, swap_val); - REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en); - - /* send unload done to the MCP */ - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); - - /* restore our func and fw_seq */ - bp->pf_num = orig_pf_num; - bp->fw_seq = - (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); + rc = bnx2x_prev_mark_path(bp, prev_undi); + if (rc) { + bnx2x_prev_mcp_done(bp); + return rc; + } + + return bnx2x_prev_mcp_done(bp); +} + +/* previous driver DMAE transaction may have occurred when pre-boot stage ended + * and boot began, or when kdump kernel was loaded. Either case would invalidate + * the addresses of the transaction, resulting in was-error bit set in the pci + * causing all hw-to-host pcie transactions to timeout. If this happened we want + * to clear the interrupt which detected this from the pglueb and the was done + * bit + */ +static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp) +{ + if (!CHIP_IS_E1x(bp)) { + u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { + DP(BNX2X_MSG_SP, + "'was error' bit was found to be set in pglueb upon startup. Clearing\n"); + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, + 1 << BP_FUNC(bp)); + } + } +} + +static int bnx2x_prev_unload(struct bnx2x *bp) +{ + int time_counter = 10; + u32 rc, fw, hw_lock_reg, hw_lock_val; + BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); + + /* clear hw from errors which may have resulted from an interrupted + * dmae transaction. + */ + bnx2x_prev_interrupted_dmae(bp); + + /* Release previously held locks */ + hw_lock_reg = (BP_FUNC(bp) <= 5) ? + (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : + (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); + + hw_lock_val = REG_RD(bp, hw_lock_reg); + if (hw_lock_val) { + if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { + BNX2X_DEV_INFO("Release Previously held NVRAM lock\n"); + REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, + (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp))); } - /* now it's safe to release the lock */ - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); + BNX2X_DEV_INFO("Release Previously held hw lock\n"); + REG_WR(bp, hw_lock_reg, 0xffffffff); + } else + BNX2X_DEV_INFO("No need to release hw/nvram locks\n"); + + if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { + BNX2X_DEV_INFO("Release previously held alr\n"); + bnx2x_release_alr(bp); } + + do { + int aer = 0; + /* Lock MCP using an unload request */ + fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); + if (!fw) { + BNX2X_ERR("MCP response failure, aborting\n"); + rc = -EBUSY; + break; + } + + rc = down_interruptible(&bnx2x_prev_sem); + if (rc) { + BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n", + rc); + } else { + /* If Path is marked by EEH, ignore unload status */ + aer = !!(bnx2x_prev_path_get_entry(bp) && + bnx2x_prev_path_get_entry(bp)->aer); + up(&bnx2x_prev_sem); + } + + if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) { + rc = bnx2x_prev_unload_common(bp); + break; + } + + /* non-common reply from MCP might require looping */ + rc = bnx2x_prev_unload_uncommon(bp); + if (rc != BNX2X_PREV_WAIT_NEEDED) + break; + + msleep(20); + } while (--time_counter); + + if (!time_counter || rc) { + BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n"); + rc = -EPROBE_DEFER; + } + + /* Mark function if its port was used to boot from SAN */ + if (bnx2x_port_after_undi(bp)) + bp->link_params.feature_config_flags |= + FEATURE_CONFIG_BOOT_FROM_SAN; + + BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc); + + return rc; } -static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) +static void bnx2x_get_common_hwinfo(struct bnx2x *bp) { u32 val, val2, val3, val4, id, boot_mode; u16 pmc; @@ -8895,12 +10570,27 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) id = ((val & 0xffff) << 16); val = REG_RD(bp, MISC_REG_CHIP_REV); id |= ((val & 0xf) << 12); - val = REG_RD(bp, MISC_REG_CHIP_METAL); - id |= ((val & 0xff) << 4); + + /* Metal is read from PCI regs, but we can't access >=0x400 from + * the configuration space (so we need to reg_rd) + */ + val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3); + id |= (((val >> 24) & 0xf) << 4); val = REG_RD(bp, MISC_REG_BOND_ID); id |= (val & 0xf); bp->common.chip_id = id; + /* force 57811 according to MISC register */ + if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { + if (CHIP_IS_57810(bp)) + bp->common.chip_id = (CHIP_NUM_57811 << 16) | + (bp->common.chip_id & 0x0000FFFF); + else if (CHIP_IS_57810_MF(bp)) + bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | + (bp->common.chip_id & 0x0000FFFF); + bp->common.chip_id |= 0x1; + } + /* Set doorbell size */ bp->db_size = (1 << BNX2X_DB_SHIFT); @@ -8924,6 +10614,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) bp->pfid = bp->pf_num; /* 0..7 */ } + BNX2X_DEV_INFO("pf_id: %x", bp->pfid); + bp->link_params.chip_id = bp->common.chip_id; BNX2X_DEV_INFO("chip ID is 0x%x\n", id); @@ -8942,14 +10634,20 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) bnx2x_init_shmem(bp); - - bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? MISC_REG_GENERIC_CR_1 : MISC_REG_GENERIC_CR_0)); bp->link_params.shmem_base = bp->common.shmem_base; bp->link_params.shmem2_base = bp->common.shmem2_base; + if (SHMEM2_RD(bp, size) > + (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) + bp->link_params.lfa_base = + REG_RD(bp, bp->common.shmem2_base + + (u32)offsetof(struct shmem2_region, + lfa_host_addr[BP_PORT(bp)])); + else + bp->link_params.lfa_base = 0; BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", bp->common.shmem_base, bp->common.shmem2_base); @@ -8981,8 +10679,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) if (val < BNX2X_BC_VER) { /* for now only warn * later we might need to enforce this */ - BNX2X_ERR("This driver needs bc_ver %X but found %X, " - "please upgrade BC\n", BNX2X_BC_VER, val); + BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n", + BNX2X_BC_VER, val); } bp->link_params.feature_config_flags |= (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? @@ -8991,13 +10689,29 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) bp->link_params.feature_config_flags |= (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; - + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ? + FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0; bp->link_params.feature_config_flags |= (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; + + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_MT_SUPPORTED) ? + FEATURE_CONFIG_MT_SUPPORT : 0; + bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? BC_SUPPORTS_PFC_STATS : 0; + bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ? + BC_SUPPORTS_FCOE_FEATURES : 0; + + bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? + BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; + + bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? + BC_SUPPORTS_RMMOD_CMD : 0; + boot_mode = SHMEM_RD(bp, dev_info.port_feature_config[BP_PORT(bp)].mba_config) & PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; @@ -9016,7 +10730,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) break; } - pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); + pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc); bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; BNX2X_DEV_INFO("%sWoL capable\n", @@ -9034,7 +10748,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) -static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) +static int bnx2x_get_igu_cam_info(struct bnx2x *bp) { int pfid = BP_FUNC(bp); int igu_sb_id; @@ -9051,7 +10765,7 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); - return; + return 0; } /* IGU in normal mode - read CAM */ @@ -9076,20 +10790,24 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) } #ifdef CONFIG_PCI_MSI - /* - * It's expected that number of CAM entries for this functions is equal - * to the number evaluated based on the MSI-X table size. We want a - * harsh warning if these values are different! + /* Due to new PF resource allocation by MFW T7.4 and above, it's + * optional that number of CAM entries will not be equal to the value + * advertised in PCI. + * Driver should use the minimal value of both as the actual status + * block count */ - WARN_ON(bp->igu_sb_cnt != igu_sb_cnt); + bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); #endif - if (igu_sb_cnt == 0) + if (igu_sb_cnt == 0) { BNX2X_ERR("CAM configuration error\n"); + return -EINVAL; + } + + return 0; } -static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, - u32 switch_cfg) +static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) { int cfg_size = 0, idx, port = BP_PORT(bp); @@ -9123,8 +10841,7 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, } if (!(bp->port.supported[0] || bp->port.supported[1])) { - BNX2X_ERR("NVRAM config error. BAD phy config." - "PHY1 config 0x%x, PHY2 config 0x%x\n", + BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n", SHMEM_RD(bp, dev_info.port_hw_config[port].external_phy_config), SHMEM_RD(bp, @@ -9182,13 +10899,16 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) + bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full; } BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], bp->port.supported[1]); } -static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) +static void bnx2x_link_settings_requested(struct bnx2x *bp) { u32 link_config, idx, cfg_size = 0; bp->port.advertising[0] = 0; @@ -9212,6 +10932,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) SPEED_AUTO_NEG; bp->port.advertising[idx] |= bp->port.supported[idx]; + if (bp->link_params.phy[EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + bp->port.advertising[idx] |= + (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full); } else { /* force 10G, no AN */ bp->link_params.req_line_speed[idx] = @@ -9231,9 +10956,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_10baseT_Full | ADVERTISED_TP); } else { - BNX2X_ERR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", + BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; @@ -9250,9 +10973,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_10baseT_Half | ADVERTISED_TP); } else { - BNX2X_ERR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", + BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; @@ -9268,9 +10989,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_100baseT_Full | ADVERTISED_TP); } else { - BNX2X_ERR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", + BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; @@ -9288,9 +11007,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_100baseT_Half | ADVERTISED_TP); } else { - BNX2X_ERR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", + BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; @@ -9306,9 +11023,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_1000baseT_Full | ADVERTISED_TP); } else { - BNX2X_ERR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", + BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; @@ -9324,9 +11039,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_2500baseX_Full | ADVERTISED_TP); } else { - BNX2X_ERR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", + BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; @@ -9342,9 +11055,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); } else { - BNX2X_ERR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", + BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; @@ -9355,8 +11066,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) break; default: - BNX2X_ERR("NVRAM config error. " - "BAD link speed link_config 0x%x\n", + BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n", link_config); bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG; @@ -9367,15 +11077,16 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) bp->link_params.req_flow_ctrl[idx] = (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); - if ((bp->link_params.req_flow_ctrl[idx] == - BNX2X_FLOW_CTRL_AUTO) && - !(bp->port.supported[idx] & SUPPORTED_Autoneg)) { - bp->link_params.req_flow_ctrl[idx] = - BNX2X_FLOW_CTRL_NONE; + if (bp->link_params.req_flow_ctrl[idx] == + BNX2X_FLOW_CTRL_AUTO) { + if (!(bp->port.supported[idx] & SUPPORTED_Autoneg)) + bp->link_params.req_flow_ctrl[idx] = + BNX2X_FLOW_CTRL_NONE; + else + bnx2x_set_requested_fc(bp); } - BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl" - " 0x%x advertising 0x%x\n", + BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n", bp->link_params.req_line_speed[idx], bp->link_params.req_duplex[idx], bp->link_params.req_flow_ctrl[idx], @@ -9383,19 +11094,19 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) } } -static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) +static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) { - mac_hi = cpu_to_be16(mac_hi); - mac_lo = cpu_to_be32(mac_lo); - memcpy(mac_buf, &mac_hi, sizeof(mac_hi)); - memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo)); + __be16 mac_hi_be = cpu_to_be16(mac_hi); + __be32 mac_lo_be = cpu_to_be32(mac_lo); + memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be)); + memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be)); } -static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) +static void bnx2x_get_port_hwinfo(struct bnx2x *bp) { int port = BP_PORT(bp); u32 config; - u32 ext_phy_type, ext_phy_config; + u32 ext_phy_type, ext_phy_config, eee_mode; bp->link_params.bp = bp; bp->link_params.port = port; @@ -9405,10 +11116,12 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) bp->link_params.speed_cap_mask[0] = SHMEM_RD(bp, - dev_info.port_hw_config[port].speed_capability_mask); + dev_info.port_hw_config[port].speed_capability_mask) & + PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; bp->link_params.speed_cap_mask[1] = SHMEM_RD(bp, - dev_info.port_hw_config[port].speed_capability_mask2); + dev_info.port_hw_config[port].speed_capability_mask2) & + PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; bp->port.link_config[0] = SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); @@ -9424,8 +11137,14 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) bp->wol = (!(bp->flags & NO_WOL_FLAG) && (config & PORT_FEATURE_WOL_ENABLED)); - BNX2X_DEV_INFO("lane_config 0x%08x " - "speed_cap_mask0 0x%08x link_config0 0x%08x\n", + if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == + PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp)) + bp->flags |= NO_ISCSI_FLAG; + if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == + PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp))) + bp->flags |= NO_FCOE_FLAG; + + BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n", bp->link_params.lane_config, bp->link_params.speed_cap_mask[0], bp->port.link_config[0]); @@ -9453,26 +11172,32 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) bp->mdio.prtad = XGXS_EXT_PHY_ADDR(ext_phy_config); - /* - * Check if hw lock is required to access MDC/MDIO bus to the PHY(s) - * In MF mode, it is set to cover self test cases - */ - if (IS_MF(bp)) - bp->port.need_hw_lock = 1; - else - bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, - bp->common.shmem_base, - bp->common.shmem2_base); + /* Configure link feature according to nvram value */ + eee_mode = (((SHMEM_RD(bp, dev_info. + port_feature_config[port].eee_power_mode)) & + PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> + PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); + if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { + bp->link_params.eee_mode = EEE_MODE_ADV_LPI | + EEE_MODE_ENABLE_LPI | + EEE_MODE_OUTPUT_TIME; + } else { + bp->link_params.eee_mode = 0; + } } void bnx2x_get_iscsi_info(struct bnx2x *bp) { -#ifdef BCM_CNIC + u32 no_flags = NO_ISCSI_FLAG; int port = BP_PORT(bp); - u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, drv_lic_key[port].max_iscsi_conn); + if (!CNIC_SUPPORT(bp)) { + bp->flags |= no_flags; + return; + } + /* Get the number of maximum allowed iSCSI connections */ bp->cnic_eth_dev.max_iscsi_conn = (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> @@ -9486,71 +11211,122 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp) * disable the feature. */ if (!bp->cnic_eth_dev.max_iscsi_conn) - bp->flags |= NO_ISCSI_FLAG; -#else - bp->flags |= NO_ISCSI_FLAG; -#endif + bp->flags |= no_flags; +} + +static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) +{ + /* Port info */ + bp->cnic_eth_dev.fcoe_wwn_port_name_hi = + MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper); + bp->cnic_eth_dev.fcoe_wwn_port_name_lo = + MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower); + + /* Node info */ + bp->cnic_eth_dev.fcoe_wwn_node_name_hi = + MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper); + bp->cnic_eth_dev.fcoe_wwn_node_name_lo = + MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); } -static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) +static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp) +{ + u8 count = 0; + + if (IS_MF(bp)) { + u8 fid; + + /* iterate over absolute function ids for this path: */ + for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) { + if (IS_MF_SD(bp)) { + u32 cfg = MF_CFG_RD(bp, + func_mf_config[fid].config); + + if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) && + ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) == + FUNC_MF_CFG_PROTOCOL_FCOE)) + count++; + } else { + u32 cfg = MF_CFG_RD(bp, + func_ext_config[fid]. + func_cfg); + + if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) && + (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)) + count++; + } + } + } else { /* SF */ + int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1; + + for (port = 0; port < port_cnt; port++) { + u32 lic = SHMEM_RD(bp, + drv_lic_key[port].max_fcoe_conn) ^ + FW_ENCODE_32BIT_PATTERN; + if (lic) + count++; + } + } + + return count; +} + +static void bnx2x_get_fcoe_info(struct bnx2x *bp) { -#ifdef BCM_CNIC int port = BP_PORT(bp); int func = BP_ABS_FUNC(bp); - u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, drv_lic_key[port].max_fcoe_conn); + u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp); + + if (!CNIC_SUPPORT(bp)) { + bp->flags |= NO_FCOE_FLAG; + return; + } /* Get the number of maximum allowed FCoE connections */ bp->cnic_eth_dev.max_fcoe_conn = (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> BNX2X_MAX_FCOE_INIT_CONN_SHIFT; + /* Calculate the number of maximum allowed FCoE tasks */ + bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; + + /* check if FCoE resources must be shared between different functions */ + if (num_fcoe_func) + bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func; + /* Read the WWN: */ if (!IS_MF(bp)) { /* Port info */ bp->cnic_eth_dev.fcoe_wwn_port_name_hi = SHMEM_RD(bp, - dev_info.port_hw_config[port]. + dev_info.port_hw_config[port]. fcoe_wwn_port_name_upper); bp->cnic_eth_dev.fcoe_wwn_port_name_lo = SHMEM_RD(bp, - dev_info.port_hw_config[port]. + dev_info.port_hw_config[port]. fcoe_wwn_port_name_lower); /* Node info */ bp->cnic_eth_dev.fcoe_wwn_node_name_hi = SHMEM_RD(bp, - dev_info.port_hw_config[port]. + dev_info.port_hw_config[port]. fcoe_wwn_node_name_upper); bp->cnic_eth_dev.fcoe_wwn_node_name_lo = SHMEM_RD(bp, - dev_info.port_hw_config[port]. + dev_info.port_hw_config[port]. fcoe_wwn_node_name_lower); } else if (!IS_MF_SD(bp)) { - u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); - /* * Read the WWN info only if the FCoE feature is enabled for * this function. */ - if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { - /* Port info */ - bp->cnic_eth_dev.fcoe_wwn_port_name_hi = - MF_CFG_RD(bp, func_ext_config[func]. - fcoe_wwn_port_name_upper); - bp->cnic_eth_dev.fcoe_wwn_port_name_lo = - MF_CFG_RD(bp, func_ext_config[func]. - fcoe_wwn_port_name_lower); - - /* Node info */ - bp->cnic_eth_dev.fcoe_wwn_node_name_hi = - MF_CFG_RD(bp, func_ext_config[func]. - fcoe_wwn_node_name_upper); - bp->cnic_eth_dev.fcoe_wwn_node_name_lo = - MF_CFG_RD(bp, func_ext_config[func]. - fcoe_wwn_node_name_lower); - } + if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) + bnx2x_get_ext_wwn_info(bp, func); + + } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) { + bnx2x_get_ext_wwn_info(bp, func); } BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); @@ -9561,12 +11337,9 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) */ if (!bp->cnic_eth_dev.max_fcoe_conn) bp->flags |= NO_FCOE_FLAG; -#else - bp->flags |= NO_FCOE_FLAG; -#endif } -static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) +static void bnx2x_get_cnic_info(struct bnx2x *bp) { /* * iSCSI may be dynamically disabled but reading @@ -9577,126 +11350,170 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) bnx2x_get_fcoe_info(bp); } -static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) +static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) { u32 val, val2; int func = BP_ABS_FUNC(bp); int port = BP_PORT(bp); -#ifdef BCM_CNIC u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; u8 *fip_mac = bp->fip_mac; -#endif - - /* Zero primary MAC configuration */ - memset(bp->dev->dev_addr, 0, ETH_ALEN); - - if (BP_NOMCP(bp)) { - BNX2X_ERROR("warning: random MAC workaround active\n"); - random_ether_addr(bp->dev->dev_addr); - } else if (IS_MF(bp)) { - val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); - val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); - if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && - (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) - bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); -#ifdef BCM_CNIC - /* - * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or + if (IS_MF(bp)) { + /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or * FCoE MAC then the appropriate feature should be disabled. + * In non SD mode features configuration comes from struct + * func_ext_config. */ - if (IS_MF_SI(bp)) { + if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) { u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { val2 = MF_CFG_RD(bp, func_ext_config[func]. - iscsi_mac_addr_upper); + iscsi_mac_addr_upper); val = MF_CFG_RD(bp, func_ext_config[func]. - iscsi_mac_addr_lower); + iscsi_mac_addr_lower); bnx2x_set_mac_buf(iscsi_mac, val, val2); - BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", - iscsi_mac); - } else + BNX2X_DEV_INFO + ("Read iSCSI MAC: %pM\n", iscsi_mac); + } else { bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; + } if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { val2 = MF_CFG_RD(bp, func_ext_config[func]. - fcoe_mac_addr_upper); + fcoe_mac_addr_upper); val = MF_CFG_RD(bp, func_ext_config[func]. - fcoe_mac_addr_lower); + fcoe_mac_addr_lower); bnx2x_set_mac_buf(fip_mac, val, val2); - BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n", - fip_mac); - - } else + BNX2X_DEV_INFO + ("Read FCoE L2 MAC: %pM\n", fip_mac); + } else { bp->flags |= NO_FCOE_FLAG; - } else { /* SD mode */ - if (BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) { + } + + bp->mf_ext_config = cfg; + + } else { /* SD MODE */ + if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { /* use primary mac as iscsi mac */ memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); - /* Zero primary MAC configuration */ - memset(bp->dev->dev_addr, 0, ETH_ALEN); BNX2X_DEV_INFO("SD ISCSI MODE\n"); - BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", - iscsi_mac); + BNX2X_DEV_INFO + ("Read iSCSI MAC: %pM\n", iscsi_mac); + } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { + /* use primary mac as fip mac */ + memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); + BNX2X_DEV_INFO("SD FCoE MODE\n"); + BNX2X_DEV_INFO + ("Read FIP MAC: %pM\n", fip_mac); } } -#endif - } else { - /* in SF read MACs from port configuration */ - val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); - val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); - bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); -#ifdef BCM_CNIC + /* If this is a storage-only interface, use SAN mac as + * primary MAC. Notice that for SD this is already the case, + * as the SAN mac was copied from the primary MAC. + */ + if (IS_MF_FCOE_AFEX(bp)) + memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); + } else { val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. - iscsi_mac_upper); + iscsi_mac_upper); val = SHMEM_RD(bp, dev_info.port_hw_config[port]. - iscsi_mac_lower); + iscsi_mac_lower); bnx2x_set_mac_buf(iscsi_mac, val, val2); val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. - fcoe_fip_mac_upper); + fcoe_fip_mac_upper); val = SHMEM_RD(bp, dev_info.port_hw_config[port]. - fcoe_fip_mac_lower); + fcoe_fip_mac_lower); bnx2x_set_mac_buf(fip_mac, val, val2); -#endif } - memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); - memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); - -#ifdef BCM_CNIC - /* Set the FCoE MAC in MF_SD mode */ - if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp)) - memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); - - /* Disable iSCSI if MAC configuration is - * invalid. - */ + /* Disable iSCSI OOO if MAC configuration is invalid. */ if (!is_valid_ether_addr(iscsi_mac)) { - bp->flags |= NO_ISCSI_FLAG; + bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; memset(iscsi_mac, 0, ETH_ALEN); } - /* Disable FCoE if MAC configuration is - * invalid. - */ + /* Disable FCoE if MAC configuration is invalid. */ if (!is_valid_ether_addr(fip_mac)) { bp->flags |= NO_FCOE_FLAG; memset(bp->fip_mac, 0, ETH_ALEN); } -#endif +} + +static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) +{ + u32 val, val2; + int func = BP_ABS_FUNC(bp); + int port = BP_PORT(bp); + + /* Zero primary MAC configuration */ + memset(bp->dev->dev_addr, 0, ETH_ALEN); + + if (BP_NOMCP(bp)) { + BNX2X_ERROR("warning: random MAC workaround active\n"); + eth_hw_addr_random(bp->dev); + } else if (IS_MF(bp)) { + val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); + val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); + if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && + (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) + bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); + + if (CNIC_SUPPORT(bp)) + bnx2x_get_cnic_mac_hwinfo(bp); + } else { + /* in SF read MACs from port configuration */ + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); + bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); + + if (CNIC_SUPPORT(bp)) + bnx2x_get_cnic_mac_hwinfo(bp); + } + + if (!BP_NOMCP(bp)) { + /* Read physical port identifier from shmem */ + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); + bnx2x_set_mac_buf(bp->phys_port_id, val, val2); + bp->flags |= HAS_PHYS_PORT_ID; + } + + memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) dev_err(&bp->pdev->dev, - "bad Ethernet MAC address configuration: " - "%pM, change it manually before bringing up " - "the appropriate network interface\n", + "bad Ethernet MAC address configuration: %pM\n" + "change it manually before bringing up the appropriate network interface\n", bp->dev->dev_addr); } -static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) +static bool bnx2x_get_dropless_info(struct bnx2x *bp) +{ + int tmp; + u32 cfg; + + if (IS_VF(bp)) + return 0; + + if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { + /* Take function: tmp = func */ + tmp = BP_ABS_FUNC(bp); + cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg); + cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING); + } else { + /* Take port: tmp = port */ + tmp = BP_PORT(bp); + cfg = SHMEM_RD(bp, + dev_info.port_hw_config[tmp].generic_features); + cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED); + } + return cfg; +} + +static int bnx2x_get_hwinfo(struct bnx2x *bp) { int /*abs*/func = BP_ABS_FUNC(bp); int vn; @@ -9716,7 +11533,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) } else { bp->common.int_block = INT_BLOCK_IGU; - /* do not allow device reset during IGU info preocessing */ + /* do not allow device reset during IGU info processing */ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); @@ -9732,12 +11549,14 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { tout--; - usleep_range(1000, 1000); + usleep_range(1000, 2000); } if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { dev_err(&bp->pdev->dev, "FORCING Normal Mode failed!!!\n"); + bnx2x_release_hw_lock(bp, + HW_LOCK_RESOURCE_RESET); return -EPERM; } } @@ -9748,9 +11567,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) } else BNX2X_DEV_INFO("IGU Normal Mode\n"); - bnx2x_get_igu_cam_info(bp); - + rc = bnx2x_get_igu_cam_info(bp); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); + if (rc) + return rc; } /* @@ -9792,7 +11612,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) E1H_FUNC_MAX * sizeof(struct drv_func_mb); /* * get mf configuration: - * 1. existence of MF configuration + * 1. Existence of MF configuration * 2. MAC address must be legal (check only upper bytes) * for Switch-Independent mode; * OVLAN must be legal for Switch-Dependent mode @@ -9814,8 +11634,20 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) bp->mf_config[vn] = MF_CFG_RD(bp, func_mf_config[func].config); } else - BNX2X_DEV_INFO("illegal MAC address " - "for SI\n"); + BNX2X_DEV_INFO("illegal MAC address for SI\n"); + break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: + if ((!CHIP_IS_E1x(bp)) && + (MF_CFG_RD(bp, func_mf_config[func]. + mac_upper) != 0xffff) && + (SHMEM2_HAS(bp, + afex_driver_support))) { + bp->mf_mode = MULTI_FUNCTION_AFEX; + bp->mf_config[vn] = MF_CFG_RD(bp, + func_mf_config[func].config); + } else { + BNX2X_DEV_INFO("can not configure afex mode\n"); + } break; case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: /* get OV configuration */ @@ -9830,10 +11662,13 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) } else BNX2X_DEV_INFO("illegal OV for SD\n"); break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: + bp->mf_config[vn] = 0; + break; default: /* Unknown configuration: reset mf_config */ bp->mf_config[vn] = 0; - BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val); + BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val); } } @@ -9848,25 +11683,27 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) bp->mf_ov = val; bp->path_has_ovlan = true; - BNX2X_DEV_INFO("MF OV for func %d is %d " - "(0x%04x)\n", func, bp->mf_ov, - bp->mf_ov); + BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", + func, bp->mf_ov, bp->mf_ov); } else { dev_err(&bp->pdev->dev, - "No valid MF OV for func %d, " - "aborting\n", func); + "No valid MF OV for func %d, aborting\n", + func); return -EPERM; } break; + case MULTI_FUNCTION_AFEX: + BNX2X_DEV_INFO("func %d is in MF afex mode\n", func); + break; case MULTI_FUNCTION_SI: - BNX2X_DEV_INFO("func %d is in MF " - "switch-independent mode\n", func); + BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", + func); break; default: if (vn) { dev_err(&bp->pdev->dev, - "VN %d is in a single function mode, " - "aborting\n", vn); + "VN %d is in a single function mode, aborting\n", + vn); return -EPERM; } break; @@ -9890,9 +11727,9 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) } } - /* adjust igu_sb_cnt to MF for E1x */ - if (CHIP_IS_E1x(bp) && IS_MF(bp)) - bp->igu_sb_cnt /= E1HVN_MAX; + /* adjust igu_sb_cnt to MF for E1H */ + if (CHIP_IS_E1H(bp) && IS_MF(bp)) + bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); /* port info */ bnx2x_get_port_hwinfo(bp); @@ -9902,20 +11739,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) bnx2x_get_cnic_info(bp); - /* Get current FW pulse sequence */ - if (!BP_NOMCP(bp)) { - int mb_idx = BP_FW_MB_IDX(bp); - - bp->fw_drv_pulse_wr_seq = - (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) & - DRV_PULSE_SEQ_MASK); - BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); - } - return rc; } -static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) +static void bnx2x_read_fwinfo(struct bnx2x *bp) { int cnt, i, block_end, rodi; char vpd_start[BNX2X_VPD_LEN+1]; @@ -10000,7 +11827,7 @@ out_not_found: return; } -static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp) +static void bnx2x_set_modes_bitmap(struct bnx2x *bp) { u32 flags = 0; @@ -10035,6 +11862,9 @@ static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp) case MULTI_FUNCTION_SI: SET_FLAGS(flags, MODE_MF_SI); break; + case MULTI_FUNCTION_AFEX: + SET_FLAGS(flags, MODE_MF_AFEX); + break; } } else SET_FLAGS(flags, MODE_SF); @@ -10047,24 +11877,29 @@ static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp) INIT_MODE_FLAGS(bp) = flags; } -static int __devinit bnx2x_init_bp(struct bnx2x *bp) +static int bnx2x_init_bp(struct bnx2x *bp) { int func; int rc; mutex_init(&bp->port.phy_mutex); mutex_init(&bp->fw_mb_mutex); + mutex_init(&bp->drv_info_mutex); + bp->drv_info_mng_owner = false; spin_lock_init(&bp->stats_lock); -#ifdef BCM_CNIC - mutex_init(&bp->cnic_mutex); -#endif + sema_init(&bp->stats_sema, 1); INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); - rc = bnx2x_get_hwinfo(bp); - if (rc) - return rc; + INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task); + if (IS_PF(bp)) { + rc = bnx2x_get_hwinfo(bp); + if (rc) + return rc; + } else { + eth_zero_addr(bp->dev->dev_addr); + } bnx2x_set_modes_bitmap(bp); @@ -10077,49 +11912,50 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) func = BP_FUNC(bp); /* need to reset chip if undi was active */ - if (!BP_NOMCP(bp)) - bnx2x_undi_unload(bp); - - /* init fw_seq after undi_unload! */ - if (!BP_NOMCP(bp)) { + if (IS_PF(bp) && !BP_NOMCP(bp)) { + /* init fw_seq */ bp->fw_seq = - (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); + SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK; BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); + + rc = bnx2x_prev_unload(bp); + if (rc) { + bnx2x_free_mem_bp(bp); + return rc; + } } if (CHIP_REV_IS_FPGA(bp)) dev_err(&bp->pdev->dev, "FPGA detected\n"); if (BP_NOMCP(bp) && (func == 0)) - dev_err(&bp->pdev->dev, "MCP disabled, " - "must load devices in order!\n"); - - bp->multi_mode = multi_mode; + dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); bp->disable_tpa = disable_tpa; - -#ifdef BCM_CNIC - bp->disable_tpa |= IS_MF_ISCSI_SD(bp); -#endif + bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); + /* Reduce memory usage in kdump environment by disabling TPA */ + bp->disable_tpa |= reset_devices; /* Set TPA flags */ if (bp->disable_tpa) { - bp->flags &= ~TPA_ENABLE_FLAG; + bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); bp->dev->features &= ~NETIF_F_LRO; } else { - bp->flags |= TPA_ENABLE_FLAG; + bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); bp->dev->features |= NETIF_F_LRO; } if (CHIP_IS_E1(bp)) bp->dropless_fc = 0; else - bp->dropless_fc = dropless_fc; + bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp); bp->mrrs = mrrs; - bp->tx_ring_size = MAX_TX_AVAIL; + bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; + if (IS_VF(bp)) + bp->rx_ring_size = MAX_RX_AVAIL; /* make sure that the numbers are in the right granularity */ bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; @@ -10132,28 +11968,52 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) bp->timer.data = (unsigned long) bp; bp->timer.function = bnx2x_timer; - bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); - bnx2x_dcbx_init_params(bp); + if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) && + SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) && + SHMEM2_RD(bp, dcbx_lldp_params_offset) && + SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) { + bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); + bnx2x_dcbx_init_params(bp); + } else { + bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF); + } -#ifdef BCM_CNIC if (CHIP_IS_E1x(bp)) bp->cnic_base_cl_id = FP_SB_MAX_E1x; else bp->cnic_base_cl_id = FP_SB_MAX_E2; -#endif /* multiple tx priority */ - if (CHIP_IS_E1x(bp)) + if (IS_VF(bp)) + bp->max_cos = 1; + else if (CHIP_IS_E1x(bp)) bp->max_cos = BNX2X_MULTI_TX_COS_E1X; - if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) + else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; - if (CHIP_IS_E3B0(bp)) + else if (CHIP_IS_E3B0(bp)) bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; + else + BNX2X_ERR("unknown chip %x revision %x\n", + CHIP_NUM(bp), CHIP_REV(bp)); + BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos); + + /* We need at least one default status block for slow-path events, + * second status block for the L2 queue, and a third status block for + * CNIC if supported. + */ + if (IS_VF(bp)) + bp->min_msix_vec_cnt = 1; + else if (CNIC_SUPPORT(bp)) + bp->min_msix_vec_cnt = 3; + else /* PF w/o cnic */ + bp->min_msix_vec_cnt = 2; + BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); + + bp->dump_preset_idx = 1; return rc; } - /**************************************************************************** * General service functions ****************************************************************************/ @@ -10166,85 +12026,88 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) static int bnx2x_open(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); - bool global = false; - int other_engine = BP_PATH(bp) ? 0 : 1; - u32 other_load_counter, load_counter; + int rc; + + bp->stats_init = true; netif_carrier_off(dev); bnx2x_set_power_state(bp, PCI_D0); - other_load_counter = bnx2x_get_load_cnt(bp, other_engine); - load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp)); - - /* - * If parity had happen during the unload, then attentions + /* If parity had happen during the unload, then attentions * and/or RECOVERY_IN_PROGRES may still be set. In this case we * want the first function loaded on the current engine to * complete the recovery. + * Parity recovery is only relevant for PF driver. */ - if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || - bnx2x_chk_parity_attn(bp, &global, true)) - do { - /* - * If there are attentions and they are in a global - * blocks, set the GLOBAL_RESET bit regardless whether - * it will be this function that will complete the - * recovery or not. - */ - if (global) - bnx2x_set_reset_global(bp); + if (IS_PF(bp)) { + int other_engine = BP_PATH(bp) ? 0 : 1; + bool other_load_status, load_status; + bool global = false; + + other_load_status = bnx2x_get_load_status(bp, other_engine); + load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); + if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || + bnx2x_chk_parity_attn(bp, &global, true)) { + do { + /* If there are attentions and they are in a + * global blocks, set the GLOBAL_RESET bit + * regardless whether it will be this function + * that will complete the recovery or not. + */ + if (global) + bnx2x_set_reset_global(bp); - /* - * Only the first function on the current engine should - * try to recover in open. In case of attentions in - * global blocks only the first in the chip should try - * to recover. - */ - if ((!load_counter && - (!global || !other_load_counter)) && - bnx2x_trylock_leader_lock(bp) && - !bnx2x_leader_reset(bp)) { - netdev_info(bp->dev, "Recovered in open\n"); - break; - } + /* Only the first function on the current + * engine should try to recover in open. In case + * of attentions in global blocks only the first + * in the chip should try to recover. + */ + if ((!load_status && + (!global || !other_load_status)) && + bnx2x_trylock_leader_lock(bp) && + !bnx2x_leader_reset(bp)) { + netdev_info(bp->dev, + "Recovered in open\n"); + break; + } - /* recovery has failed... */ - bnx2x_set_power_state(bp, PCI_D3hot); - bp->recovery_state = BNX2X_RECOVERY_FAILED; + /* recovery has failed... */ + bnx2x_set_power_state(bp, PCI_D3hot); + bp->recovery_state = BNX2X_RECOVERY_FAILED; - netdev_err(bp->dev, "Recovery flow hasn't been properly" - " completed yet. Try again later. If u still see this" - " message after a few retries then power cycle is" - " required.\n"); + BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n" + "If you still see this message after a few retries then power cycle is required.\n"); - return -EAGAIN; - } while (0); + return -EAGAIN; + } while (0); + } + } bp->recovery_state = BNX2X_RECOVERY_DONE; - return bnx2x_nic_load(bp, LOAD_OPEN); + rc = bnx2x_nic_load(bp, LOAD_OPEN); + if (rc) + return rc; + return 0; } /* called with rtnl_lock */ -int bnx2x_close(struct net_device *dev) +static int bnx2x_close(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); /* Unload the driver, release IRQs */ - bnx2x_nic_unload(bp, UNLOAD_CLOSE); - - /* Power off */ - bnx2x_set_power_state(bp, PCI_D3hot); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); return 0; } -static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p) +static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p) { int mc_count = netdev_mc_count(bp->dev); struct bnx2x_mcast_list_elem *mc_mac = - kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); + kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC); struct netdev_hw_addr *ha; if (!mc_mac) @@ -10263,7 +12126,7 @@ static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp, return 0; } -static inline void bnx2x_free_mcast_macs_list( +static void bnx2x_free_mcast_macs_list( struct bnx2x_mcast_ramrod_params *p) { struct bnx2x_mcast_list_elem *mc_mac = @@ -10281,12 +12144,12 @@ static inline void bnx2x_free_mcast_macs_list( * * We will use zero (0) as a MAC type for these MACs. */ -static inline int bnx2x_set_uc_list(struct bnx2x *bp) +static int bnx2x_set_uc_list(struct bnx2x *bp) { int rc; struct net_device *dev = bp->dev; struct netdev_hw_addr *ha; - struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; + struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; unsigned long ramrod_flags = 0; /* First schedule a cleanup up of old configuration */ @@ -10299,7 +12162,14 @@ static inline int bnx2x_set_uc_list(struct bnx2x *bp) netdev_for_each_uc_addr(ha, dev) { rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, BNX2X_UC_LIST_MAC, &ramrod_flags); - if (rc < 0) { + if (rc == -EEXIST) { + DP(BNX2X_MSG_SP, + "Failed to schedule ADD operations: %d\n", rc); + /* do not treat adding same MAC as error */ + rc = 0; + + } else if (rc < 0) { + BNX2X_ERR("Failed to schedule ADD operations: %d\n", rc); return rc; @@ -10312,10 +12182,10 @@ static inline int bnx2x_set_uc_list(struct bnx2x *bp) BNX2X_UC_LIST_MAC, &ramrod_flags); } -static inline int bnx2x_set_mc_list(struct bnx2x *bp) +static int bnx2x_set_mc_list(struct bnx2x *bp) { struct net_device *dev = bp->dev; - struct bnx2x_mcast_ramrod_params rparam = {0}; + struct bnx2x_mcast_ramrod_params rparam = {NULL}; int rc = 0; rparam.mcast_obj = &bp->mcast_obj; @@ -10323,8 +12193,7 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp) /* first, clear all configured multicast MACs */ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); if (rc < 0) { - BNX2X_ERR("Failed to clear multicast " - "configuration: %d\n", rc); + BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc); return rc; } @@ -10332,8 +12201,8 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp) if (netdev_mc_count(dev)) { rc = bnx2x_init_mcast_macs_list(bp, &rparam); if (rc) { - BNX2X_ERR("Failed to create multicast MACs " - "list: %d\n", rc); + BNX2X_ERR("Failed to create multicast MACs list: %d\n", + rc); return rc; } @@ -10341,8 +12210,8 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp) rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_ADD); if (rc < 0) - BNX2X_ERR("Failed to set a new multicast " - "configuration: %d\n", rc); + BNX2X_ERR("Failed to set a new multicast configuration: %d\n", + rc); bnx2x_free_mcast_macs_list(&rparam); } @@ -10350,49 +12219,78 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp) return rc; } - /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ -void bnx2x_set_rx_mode(struct net_device *dev) +static void bnx2x_set_rx_mode(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); - u32 rx_mode = BNX2X_RX_MODE_NORMAL; if (bp->state != BNX2X_STATE_OPEN) { DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); return; + } else { + /* Schedule an SP task to handle rest of change */ + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE, + NETIF_MSG_IFUP); } +} + +void bnx2x_set_rx_mode_inner(struct bnx2x *bp) +{ + u32 rx_mode = BNX2X_RX_MODE_NORMAL; DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); - if (dev->flags & IFF_PROMISC) + netif_addr_lock_bh(bp->dev); + + if (bp->dev->flags & IFF_PROMISC) { rx_mode = BNX2X_RX_MODE_PROMISC; - else if ((dev->flags & IFF_ALLMULTI) || - ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && - CHIP_IS_E1(bp))) + } else if ((bp->dev->flags & IFF_ALLMULTI) || + ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) && + CHIP_IS_E1(bp))) { rx_mode = BNX2X_RX_MODE_ALLMULTI; - else { - /* some multicasts */ - if (bnx2x_set_mc_list(bp) < 0) - rx_mode = BNX2X_RX_MODE_ALLMULTI; - - if (bnx2x_set_uc_list(bp) < 0) - rx_mode = BNX2X_RX_MODE_PROMISC; + } else { + if (IS_PF(bp)) { + /* some multicasts */ + if (bnx2x_set_mc_list(bp) < 0) + rx_mode = BNX2X_RX_MODE_ALLMULTI; + + /* release bh lock, as bnx2x_set_uc_list might sleep */ + netif_addr_unlock_bh(bp->dev); + if (bnx2x_set_uc_list(bp) < 0) + rx_mode = BNX2X_RX_MODE_PROMISC; + netif_addr_lock_bh(bp->dev); + } else { + /* configuring mcast to a vf involves sleeping (when we + * wait for the pf's response). + */ + bnx2x_schedule_sp_rtnl(bp, + BNX2X_SP_RTNL_VFPF_MCAST, 0); + } } bp->rx_mode = rx_mode; -#ifdef BCM_CNIC /* handle ISCSI SD mode */ if (IS_MF_ISCSI_SD(bp)) bp->rx_mode = BNX2X_RX_MODE_NONE; -#endif /* Schedule the rx_mode command */ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); + netif_addr_unlock_bh(bp->dev); return; } - bnx2x_set_storm_rx_mode(bp); + if (IS_PF(bp)) { + bnx2x_set_storm_rx_mode(bp); + netif_addr_unlock_bh(bp->dev); + } else { + /* VF will need to request the PF to make this change, and so + * the VF needs to release the bottom-half lock prior to the + * request (as it will likely require sleep on the VF side) + */ + netif_addr_unlock_bh(bp->dev); + bnx2x_vfpf_storm_rx_mode(bp); + } } /* called with rtnl_lock */ @@ -10426,8 +12324,9 @@ static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad, struct bnx2x *bp = netdev_priv(netdev); int rc; - DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x," - " value 0x%x\n", prtad, devad, addr, value); + DP(NETIF_MSG_LINK, + "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n", + prtad, devad, addr, value); /* The HW expects different devad if CL22 is used */ devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; @@ -10457,10 +12356,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void poll_bnx2x(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); + int i; - disable_irq(bp->pdev->irq); - bnx2x_interrupt(bp->pdev->irq, dev); - enable_irq(bp->pdev->irq); + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + } } #endif @@ -10468,8 +12369,28 @@ static int bnx2x_validate_addr(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); - if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) + /* query the bulletin board for mac address configured by the PF */ + if (IS_VF(bp)) + bnx2x_sample_bulletin(bp); + + if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) { + BNX2X_ERR("Non-valid Ethernet address\n"); return -EADDRNOTAVAIL; + } + return 0; +} + +static int bnx2x_get_phys_port_id(struct net_device *netdev, + struct netdev_phys_port_id *ppid) +{ + struct bnx2x *bp = netdev_priv(netdev); + + if (!(bp->flags & HAS_PHYS_PORT_ID)) + return -EOPNOTSUPP; + + ppid->id_len = sizeof(bp->phys_port_id); + memcpy(ppid->id, bp->phys_port_id, ppid->id_len); + return 0; } @@ -10490,24 +12411,27 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_poll_controller = poll_bnx2x, #endif .ndo_setup_tc = bnx2x_setup_tc, - -#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) +#ifdef CONFIG_BNX2X_SRIOV + .ndo_set_vf_mac = bnx2x_set_vf_mac, + .ndo_set_vf_vlan = bnx2x_set_vf_vlan, + .ndo_get_vf_config = bnx2x_get_vf_config, +#endif +#ifdef NETDEV_FCOE_WWNN .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, #endif + +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = bnx2x_low_latency_recv, +#endif + .ndo_get_phys_port_id = bnx2x_get_phys_port_id, }; -static inline int bnx2x_set_coherency_mask(struct bnx2x *bp) +static int bnx2x_set_coherency_mask(struct bnx2x *bp) { struct device *dev = &bp->pdev->dev; - if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { - bp->flags |= USING_DAC_FLAG; - if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { - dev_err(dev, "dma_set_coherent_mask failed, " - "aborting\n"); - return -EIO; - } - } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 && + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) { dev_err(dev, "System does not support DMA, aborting\n"); return -EIO; } @@ -10515,23 +12439,27 @@ static inline int bnx2x_set_coherency_mask(struct bnx2x *bp) return 0; } -static int __devinit bnx2x_init_dev(struct pci_dev *pdev, - struct net_device *dev, - unsigned long board_type) +static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp) +{ + if (bp->flags & AER_ENABLED) { + pci_disable_pcie_error_reporting(bp->pdev); + bp->flags &= ~AER_ENABLED; + } +} + +static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, + struct net_device *dev, unsigned long board_type) { - struct bnx2x *bp; int rc; + u32 pci_cfg_dword; bool chip_is_e1x = (board_type == BCM57710 || board_type == BCM57711 || board_type == BCM57711E); SET_NETDEV_DEV(dev, &pdev->dev); - bp = netdev_priv(dev); bp->dev = dev; bp->pdev = pdev; - bp->flags = 0; - bp->pf_num = PCI_FUNC(pdev->devfn); rc = pci_enable_device(pdev); if (rc) { @@ -10547,9 +12475,16 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, goto err_out_disable; } - if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { - dev_err(&bp->pdev->dev, "Cannot find second PCI device" - " base address, aborting\n"); + if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n"); + rc = -ENODEV; + goto err_out_disable; + } + + pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword); + if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) == + PCICFG_REVESION_ID_ERROR_VAL) { + pr_err("PCI device error, probably due to fan failure, aborting\n"); rc = -ENODEV; goto err_out_disable; } @@ -10566,16 +12501,17 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, pci_save_state(pdev); } - bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); - if (bp->pm_cap == 0) { - dev_err(&bp->pdev->dev, - "Cannot find power management capability, aborting\n"); - rc = -EIO; - goto err_out_release; + if (IS_PF(bp)) { + if (!pdev->pm_cap) { + dev_err(&bp->pdev->dev, + "Cannot find power management capability, aborting\n"); + rc = -EIO; + goto err_out_release; + } } if (!pci_is_pcie(pdev)) { - dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); + dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); rc = -EIO; goto err_out_release; } @@ -10598,54 +12534,86 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, goto err_out_release; } - bnx2x_set_power_state(bp, PCI_D0); + /* In E1/E1H use pci device function given by kernel. + * In E2/E3 read physical function from ME register since these chips + * support Physical Device Assignment where kernel BDF maybe arbitrary + * (depending on hypervisor). + */ + if (chip_is_e1x) { + bp->pf_num = PCI_FUNC(pdev->devfn); + } else { + /* chip is E2/3*/ + pci_read_config_dword(bp->pdev, + PCICFG_ME_REGISTER, &pci_cfg_dword); + bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> + ME_REG_ABS_PF_NUM_SHIFT); + } + BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); /* clean indirect addresses */ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); + + /* AER (Advanced Error reporting) configuration */ + rc = pci_enable_pcie_error_reporting(pdev); + if (!rc) + bp->flags |= AER_ENABLED; + else + BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc); + /* * Clean the following indirect addresses for all functions since it * is not used by the driver. */ - REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); + if (IS_PF(bp)) { + REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); + + if (chip_is_e1x) { + REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); + } - if (chip_is_e1x) { - REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); + /* Enable internal target-read (in case we are probed after PF + * FLR). Must be done prior to any BAR read access. Only for + * 57712 and up + */ + if (!chip_is_e1x) + REG_WR(bp, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); } - /* - * Enable internal target-read (in case we are probed after PF FLR). - * Must be done prior to any BAR read access. Only for 57712 and up - */ - if (!chip_is_e1x) - REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); - - /* Reset the load counter */ - bnx2x_clear_load_cnt(bp); - dev->watchdog_timeo = TX_TIMEOUT; dev->netdev_ops = &bnx2x_netdev_ops; - bnx2x_set_ethtool_ops(dev); + bnx2x_set_ethtool_ops(bp, dev); dev->priv_flags |= IFF_UNICAST_FLT; dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_LRO | - NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX; + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | + NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; + if (!CHIP_IS_E1x(bp)) { + dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; + dev->hw_enc_features = + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_GSO_IPIP | + NETIF_F_GSO_SIT | + NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; + } dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; - dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX; - if (bp->flags & USING_DAC_FLAG) - dev->features |= NETIF_F_HIGHDMA; + dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; + dev->features |= NETIF_F_HIGHDMA; /* Add Loopback capability to the device */ dev->hw_features |= NETIF_F_LOOPBACK; @@ -10670,35 +12638,25 @@ err_out_release: err_out_disable: pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); err_out: return rc; } -static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp, - int *width, int *speed) -{ - u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL); - - *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; - - /* return value of 1=2.5GHz 2=5GHz */ - *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; -} - static int bnx2x_check_firmware(struct bnx2x *bp) { const struct firmware *firmware = bp->firmware; struct bnx2x_fw_file_hdr *fw_hdr; struct bnx2x_fw_file_section *sections; u32 offset, len, num_ops; - u16 *ops_offsets; + __be16 *ops_offsets; int i; const u8 *fw_ver; - if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) + if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) { + BNX2X_ERR("Wrong FW size\n"); return -EINVAL; + } fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; sections = (struct bnx2x_fw_file_section *)fw_hdr; @@ -10709,21 +12667,19 @@ static int bnx2x_check_firmware(struct bnx2x *bp) offset = be32_to_cpu(sections[i].offset); len = be32_to_cpu(sections[i].len); if (offset + len > firmware->size) { - dev_err(&bp->pdev->dev, - "Section %d length is out of bounds\n", i); + BNX2X_ERR("Section %d length is out of bounds\n", i); return -EINVAL; } } /* Likewise for the init_ops offsets */ offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); - ops_offsets = (u16 *)(firmware->data + offset); + ops_offsets = (__force __be16 *)(firmware->data + offset); num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { if (be16_to_cpu(ops_offsets[i]) > num_ops) { - dev_err(&bp->pdev->dev, - "Section offset %d is out of bounds\n", i); + BNX2X_ERR("Section offset %d is out of bounds\n", i); return -EINVAL; } } @@ -10735,10 +12691,9 @@ static int bnx2x_check_firmware(struct bnx2x *bp) (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { - dev_err(&bp->pdev->dev, - "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", - fw_ver[0], fw_ver[1], fw_ver[2], - fw_ver[3], BCM_5710_FW_MAJOR_VERSION, + BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", + fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], + BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_ENGINEERING_VERSION); @@ -10748,7 +12703,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp) return 0; } -static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) +static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) { const __be32 *source = (const __be32 *)_source; u32 *target = (u32 *)_target; @@ -10762,7 +12717,7 @@ static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) Ops array is stored in the following format: {op(8bit), offset(24bit, big endian), data(32bit, big endian)} */ -static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) +static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) { const __be32 *source = (const __be32 *)_source; struct raw_op *target = (struct raw_op *)_target; @@ -10776,11 +12731,10 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) } } -/** - * IRO array is stored in the following format: +/* IRO array is stored in the following format: * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } */ -static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) +static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) { const __be32 *source = (const __be32 *)_source; struct iro *target = (struct iro *)_target; @@ -10800,7 +12754,7 @@ static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) } } -static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) +static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) { const __be16 *source = (const __be16 *)_source; u16 *target = (u16 *)_target; @@ -10814,48 +12768,44 @@ static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) do { \ u32 len = be32_to_cpu(fw_hdr->arr.len); \ bp->arr = kmalloc(len, GFP_KERNEL); \ - if (!bp->arr) { \ - pr_err("Failed to allocate %d bytes for "#arr"\n", len); \ + if (!bp->arr) \ goto lbl; \ - } \ func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ (u8 *)bp->arr, len); \ } while (0) -int bnx2x_init_firmware(struct bnx2x *bp) +static int bnx2x_init_firmware(struct bnx2x *bp) { + const char *fw_file_name; struct bnx2x_fw_file_hdr *fw_hdr; int rc; + if (bp->firmware) + return 0; - if (!bp->firmware) { - const char *fw_file_name; - - if (CHIP_IS_E1(bp)) - fw_file_name = FW_FILE_NAME_E1; - else if (CHIP_IS_E1H(bp)) - fw_file_name = FW_FILE_NAME_E1H; - else if (!CHIP_IS_E1x(bp)) - fw_file_name = FW_FILE_NAME_E2; - else { - BNX2X_ERR("Unsupported chip revision\n"); - return -EINVAL; - } - BNX2X_DEV_INFO("Loading %s\n", fw_file_name); + if (CHIP_IS_E1(bp)) + fw_file_name = FW_FILE_NAME_E1; + else if (CHIP_IS_E1H(bp)) + fw_file_name = FW_FILE_NAME_E1H; + else if (!CHIP_IS_E1x(bp)) + fw_file_name = FW_FILE_NAME_E2; + else { + BNX2X_ERR("Unsupported chip revision\n"); + return -EINVAL; + } + BNX2X_DEV_INFO("Loading %s\n", fw_file_name); - rc = request_firmware(&bp->firmware, fw_file_name, - &bp->pdev->dev); - if (rc) { - BNX2X_ERR("Can't load firmware file %s\n", - fw_file_name); - goto request_firmware_exit; - } + rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); + if (rc) { + BNX2X_ERR("Can't load firmware file %s\n", + fw_file_name); + goto request_firmware_exit; + } - rc = bnx2x_check_firmware(bp); - if (rc) { - BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); - goto request_firmware_exit; - } + rc = bnx2x_check_firmware(bp); + if (rc) { + BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); + goto request_firmware_exit; } fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; @@ -10901,6 +12851,7 @@ init_ops_alloc_err: kfree(bp->init_data); request_firmware_exit: release_firmware(bp->firmware); + bp->firmware = NULL; return rc; } @@ -10914,7 +12865,6 @@ static void bnx2x_release_firmware(struct bnx2x *bp) bp->firmware = NULL; } - static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { .init_hw_cmn_chip = bnx2x_init_hw_common_chip, .init_hw_cmn = bnx2x_init_hw_common, @@ -10940,17 +12890,22 @@ void bnx2x__init_func_obj(struct bnx2x *bp) bnx2x_init_func_obj(bp, &bp->func_obj, bnx2x_sp(bp, func_rdata), bnx2x_sp_mapping(bp, func_rdata), + bnx2x_sp(bp, func_afex_rdata), + bnx2x_sp_mapping(bp, func_afex_rdata), &bnx2x_func_sp_drv); } /* must be called after sriov-enable */ -static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp) +static int bnx2x_set_qm_cid_count(struct bnx2x *bp) { - int cid_count = BNX2X_L2_CID_COUNT(bp); + int cid_count = BNX2X_L2_MAX_CID(bp); + + if (IS_SRIOV(bp)) + cid_count += BNX2X_VF_CIDS; + + if (CNIC_SUPPORT(bp)) + cid_count += CNIC_CID_MAX; -#ifdef BCM_CNIC - cid_count += CNIC_CID_MAX; -#endif return roundup(cid_count, QM_CID_ROUND); } @@ -10960,134 +12915,185 @@ static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp) * @dev: pci device * */ -static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) +static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt) { - int pos; - u16 control; - - pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + int index; + u16 control = 0; /* * If MSI-X is not supported - return number of SBs needed to support * one fast path queue: one FP queue + SB for CNIC */ - if (!pos) - return 1 + CNIC_PRESENT; + if (!pdev->msix_cap) { + dev_info(&pdev->dev, "no msix capability found\n"); + return 1 + cnic_cnt; + } + dev_info(&pdev->dev, "msix capability found\n"); /* * The value in the PCI configuration space is the index of the last * entry, namely one less than the actual size of the table, which is * exactly what we want to return from this function: number of all SBs * without the default SB. + * For VFs there is no default SB, then we return (index+1). */ - pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); - return control & PCI_MSIX_FLAGS_QSIZE; + pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control); + + index = control & PCI_MSIX_FLAGS_QSIZE; + + return index; } -static int __devinit bnx2x_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int set_max_cos_est(int chip_id) { - struct net_device *dev = NULL; - struct bnx2x *bp; - int pcie_width, pcie_speed; - int rc, max_non_def_sbs; - int rx_count, tx_count, rss_count; - /* - * An estimated maximum supported CoS number according to the chip - * version. - * We will try to roughly estimate the maximum number of CoSes this chip - * may support in order to minimize the memory allocated for Tx - * netdev_queue's. This number will be accurately calculated during the - * initialization of bp->max_cos based on the chip versions AND chip - * revision in the bnx2x_init_bp(). - */ - u8 max_cos_est = 0; - - switch (ent->driver_data) { + switch (chip_id) { case BCM57710: case BCM57711: case BCM57711E: - max_cos_est = BNX2X_MULTI_TX_COS_E1X; - break; - + return BNX2X_MULTI_TX_COS_E1X; case BCM57712: case BCM57712_MF: - max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0; - break; - + return BNX2X_MULTI_TX_COS_E2_E3A0; case BCM57800: case BCM57800_MF: case BCM57810: case BCM57810_MF: - case BCM57840: + case BCM57840_4_10: + case BCM57840_2_20: + case BCM57840_O: + case BCM57840_MFO: case BCM57840_MF: - max_cos_est = BNX2X_MULTI_TX_COS_E3B0; - break; - + case BCM57811: + case BCM57811_MF: + return BNX2X_MULTI_TX_COS_E3B0; + case BCM57712_VF: + case BCM57800_VF: + case BCM57810_VF: + case BCM57840_VF: + case BCM57811_VF: + return 1; default: - pr_err("Unknown board_type (%ld), aborting\n", - ent->driver_data); + pr_err("Unknown board_type (%d), aborting\n", chip_id); return -ENODEV; } +} + +static int set_is_vf(int chip_id) +{ + switch (chip_id) { + case BCM57712_VF: + case BCM57800_VF: + case BCM57810_VF: + case BCM57840_VF: + case BCM57811_VF: + return true; + default: + return false; + } +} - max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); +static int bnx2x_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev = NULL; + struct bnx2x *bp; + enum pcie_link_width pcie_width; + enum pci_bus_speed pcie_speed; + int rc, max_non_def_sbs; + int rx_count, tx_count, rss_count, doorbell_size; + int max_cos_est; + bool is_vf; + int cnic_cnt; - /* !!! FIXME !!! - * Do not allow the maximum SB count to grow above 16 - * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48. - * We will use the FP_SB_MAX_E1x macro for this matter. + /* An estimated maximum supported CoS number according to the chip + * version. + * We will try to roughly estimate the maximum number of CoSes this chip + * may support in order to minimize the memory allocated for Tx + * netdev_queue's. This number will be accurately calculated during the + * initialization of bp->max_cos based on the chip versions AND chip + * revision in the bnx2x_init_bp(). */ - max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs); + max_cos_est = set_max_cos_est(ent->driver_data); + if (max_cos_est < 0) + return max_cos_est; + is_vf = set_is_vf(ent->driver_data); + cnic_cnt = is_vf ? 0 : 1; - WARN_ON(!max_non_def_sbs); + max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt); + + /* add another SB for VF as it has no default SB */ + max_non_def_sbs += is_vf ? 1 : 0; /* Maximum number of RSS queues: one IGU SB goes to CNIC */ - rss_count = max_non_def_sbs - CNIC_PRESENT; + rss_count = max_non_def_sbs - cnic_cnt; + + if (rss_count < 1) + return -EINVAL; /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ - rx_count = rss_count + FCOE_PRESENT; + rx_count = rss_count + cnic_cnt; - /* - * Maximum number of netdev Tx queues: - * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 + /* Maximum number of netdev Tx queues: + * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 */ - tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT; + tx_count = rss_count * max_cos_est + cnic_cnt; /* dev zeroed in init_etherdev */ dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); - if (!dev) { - dev_err(&pdev->dev, "Cannot allocate net device\n"); + if (!dev) return -ENOMEM; - } bp = netdev_priv(dev); - DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n", - tx_count, rx_count); + bp->flags = 0; + if (is_vf) + bp->flags |= IS_VF_FLAG; bp->igu_sb_cnt = max_non_def_sbs; + bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; bp->msg_enable = debug; + bp->cnic_support = cnic_cnt; + bp->cnic_probe = bnx2x_cnic_probe; + pci_set_drvdata(pdev, dev); - rc = bnx2x_init_dev(pdev, dev, ent->driver_data); + rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data); if (rc < 0) { free_netdev(dev); return rc; } - DP(NETIF_MSG_DRV, "max_non_def_sbs %d\n", max_non_def_sbs); + BNX2X_DEV_INFO("This is a %s function\n", + IS_PF(bp) ? "physical" : "virtual"); + BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off"); + BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs); + BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", + tx_count, rx_count); rc = bnx2x_init_bp(bp); if (rc) goto init_one_exit; - /* - * Map doorbels here as we need the real value of bp->max_cos which - * is initialized in bnx2x_init_bp(). + /* Map doorbells here as we need the real value of bp->max_cos which + * is initialized in bnx2x_init_bp() to determine the number of + * l2 connections. */ - bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), - min_t(u64, BNX2X_DB_SIZE(bp), - pci_resource_len(pdev, 2))); + if (IS_VF(bp)) { + bp->doorbells = bnx2x_vf_doorbells(bp); + rc = bnx2x_vf_pci_alloc(bp); + if (rc) + goto init_one_exit; + } else { + doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); + if (doorbell_size > pci_resource_len(pdev, 2)) { + dev_err(&bp->pdev->dev, + "Cannot map doorbells, bar size too small, aborting\n"); + rc = -ENOMEM; + goto init_one_exit; + } + bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), + doorbell_size); + } if (!bp->doorbells) { dev_err(&bp->pdev->dev, "Cannot map doorbell space, aborting\n"); @@ -11095,57 +13101,77 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, goto init_one_exit; } + if (IS_VF(bp)) { + rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); + if (rc) + goto init_one_exit; + } + + /* Enable SRIOV if capability found in configuration space */ + rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); + if (rc) + goto init_one_exit; + /* calc qm_cid_count */ bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); + BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count); -#ifdef BCM_CNIC - /* disable FCOE L2 queue for E1x */ + /* disable FCOE L2 queue for E1x*/ if (CHIP_IS_E1x(bp)) bp->flags |= NO_FCOE_FLAG; -#endif + /* Set bp->num_queues for MSI-X mode*/ + bnx2x_set_num_queues(bp); /* Configure interrupt mode: try to enable MSI-X/MSI if - * needed, set bp->num_queues appropriately. + * needed. */ - bnx2x_set_int_mode(bp); - - /* Add all NAPI objects */ - bnx2x_add_all_napi(bp); + rc = bnx2x_set_int_mode(bp); + if (rc) { + dev_err(&pdev->dev, "Cannot set interrupts\n"); + goto init_one_exit; + } + BNX2X_DEV_INFO("set interrupts successfully\n"); + /* register the net device */ rc = register_netdev(dev); if (rc) { dev_err(&pdev->dev, "Cannot register net device\n"); goto init_one_exit; } + BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); -#ifdef BCM_CNIC if (!NO_FCOE(bp)) { /* Add storage MAC address */ rtnl_lock(); dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); rtnl_unlock(); } -#endif - - bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); - - netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", - board_info[ent->driver_data].name, - (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), - pcie_width, - ((!CHIP_IS_E2(bp) && pcie_speed == 2) || - (CHIP_IS_E2(bp) && pcie_speed == 1)) ? - "5GHz (Gen2)" : "2.5GHz", - dev->base_addr, bp->pdev->irq, dev->dev_addr); + if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) || + pcie_speed == PCI_SPEED_UNKNOWN || + pcie_width == PCIE_LNK_WIDTH_UNKNOWN) + BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n"); + else + BNX2X_DEV_INFO( + "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", + board_info[ent->driver_data].name, + (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), + pcie_width, + pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" : + pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" : + pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" : + "Unknown", + dev->base_addr, bp->pdev->irq, dev->dev_addr); return 0; init_one_exit: + bnx2x_disable_pcie_error_reporting(bp); + if (bp->regview) iounmap(bp->regview); - if (bp->doorbells) + if (IS_PF(bp) && bp->doorbells) iounmap(bp->doorbells); free_netdev(dev); @@ -11154,141 +13180,140 @@ init_one_exit: pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); return rc; } -static void __devexit bnx2x_remove_one(struct pci_dev *pdev) +static void __bnx2x_remove(struct pci_dev *pdev, + struct net_device *dev, + struct bnx2x *bp, + bool remove_netdev) { - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2x *bp; - - if (!dev) { - dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); - return; - } - bp = netdev_priv(dev); - -#ifdef BCM_CNIC /* Delete storage MAC address */ if (!NO_FCOE(bp)) { rtnl_lock(); dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); rtnl_unlock(); } -#endif #ifdef BCM_DCBNL /* Delete app tlvs from dcbnl */ bnx2x_dcbnl_update_applist(bp, true); #endif - unregister_netdev(dev); + if (IS_PF(bp) && + !BP_NOMCP(bp) && + (bp->flags & BC_SUPPORTS_RMMOD_CMD)) + bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); - /* Delete all NAPI objects */ - bnx2x_del_all_napi(bp); + /* Close the interface - either directly or implicitly */ + if (remove_netdev) { + unregister_netdev(dev); + } else { + rtnl_lock(); + dev_close(dev); + rtnl_unlock(); + } + + bnx2x_iov_remove_one(bp); /* Power on: we can't let PCI layer write to us while we are in D3 */ - bnx2x_set_power_state(bp, PCI_D0); + if (IS_PF(bp)) + bnx2x_set_power_state(bp, PCI_D0); /* Disable MSI/MSI-X */ bnx2x_disable_msi(bp); /* Power off */ - bnx2x_set_power_state(bp, PCI_D3hot); + if (IS_PF(bp)) + bnx2x_set_power_state(bp, PCI_D3hot); /* Make sure RESET task is not scheduled before continuing */ cancel_delayed_work_sync(&bp->sp_rtnl_task); - if (bp->regview) - iounmap(bp->regview); + /* send message via vfpf channel to release the resources of this vf */ + if (IS_VF(bp)) + bnx2x_vfpf_release(bp); - if (bp->doorbells) - iounmap(bp->doorbells); + /* Assumes no further PCIe PM changes will occur */ + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, bp->wol); + pci_set_power_state(pdev, PCI_D3hot); + } - bnx2x_release_firmware(bp); + bnx2x_disable_pcie_error_reporting(bp); + if (remove_netdev) { + if (bp->regview) + iounmap(bp->regview); - bnx2x_free_mem_bp(bp); + /* For vfs, doorbells are part of the regview and were unmapped + * along with it. FW is only loaded by PF. + */ + if (IS_PF(bp)) { + if (bp->doorbells) + iounmap(bp->doorbells); - free_netdev(dev); + bnx2x_release_firmware(bp); + } else { + bnx2x_vf_pci_dealloc(bp); + } + bnx2x_free_mem_bp(bp); - if (atomic_read(&pdev->enable_cnt) == 1) - pci_release_regions(pdev); + free_netdev(dev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); + if (atomic_read(&pdev->enable_cnt) == 1) + pci_release_regions(pdev); + + pci_disable_device(pdev); + } } -static int bnx2x_eeh_nic_unload(struct bnx2x *bp) +static void bnx2x_remove_one(struct pci_dev *pdev) { - int i; + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; - bp->state = BNX2X_STATE_ERROR; + if (!dev) { + dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); + return; + } + bp = netdev_priv(dev); + + __bnx2x_remove(pdev, dev, bp, true); +} + +static int bnx2x_eeh_nic_unload(struct bnx2x *bp) +{ + bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; bp->rx_mode = BNX2X_RX_MODE_NONE; -#ifdef BCM_CNIC - bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); -#endif + if (CNIC_LOADED(bp)) + bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); + /* Stop Tx */ bnx2x_tx_disable(bp); - - bnx2x_netif_stop(bp, 0); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + if (CNIC_LOADED(bp)) + bnx2x_del_all_napi_cnic(bp); + netdev_reset_tc(bp->dev); del_timer_sync(&bp->timer); + cancel_delayed_work_sync(&bp->sp_task); + cancel_delayed_work_sync(&bp->period_task); - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - - /* Release IRQs */ - bnx2x_free_irq(bp); - - /* Free SKBs, SGEs, TPA pool and driver internals */ - bnx2x_free_skbs(bp); - - for_each_rx_queue(bp, i) - bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); - - bnx2x_free_mem(bp); + spin_lock_bh(&bp->stats_lock); + bp->stats_state = STATS_STATE_DISABLED; + spin_unlock_bh(&bp->stats_lock); - bp->state = BNX2X_STATE_CLOSED; + bnx2x_save_statistics(bp); netif_carrier_off(bp->dev); return 0; } -static void bnx2x_eeh_recover(struct bnx2x *bp) -{ - u32 val; - - mutex_init(&bp->port.phy_mutex); - - bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); - bp->link_params.shmem_base = bp->common.shmem_base; - BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base); - - if (!bp->common.shmem_base || - (bp->common.shmem_base < 0xA0000) || - (bp->common.shmem_base >= 0xC0000)) { - BNX2X_DEV_INFO("MCP not active\n"); - bp->flags |= NO_MCP_FLAG; - return; - } - - val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); - if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) - != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) - BNX2X_ERR("BAD MCP validity signature\n"); - - if (!BP_NOMCP(bp)) { - bp->fw_seq = - (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); - BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); - } -} - /** * bnx2x_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -11305,6 +13330,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, rtnl_lock(); + BNX2X_ERR("IO error detected\n"); + netif_device_detach(dev); if (state == pci_channel_io_perm_failure) { @@ -11315,6 +13342,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, if (netif_running(dev)) bnx2x_eeh_nic_unload(bp); + bnx2x_prev_path_mark_eeh(bp); + pci_disable_device(pdev); rtnl_unlock(); @@ -11333,9 +13362,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct bnx2x *bp = netdev_priv(dev); + int i; rtnl_lock(); - + BNX2X_ERR("IO slot reset initializing...\n"); if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset\n"); @@ -11345,12 +13375,61 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) pci_set_master(pdev); pci_restore_state(pdev); + pci_save_state(pdev); if (netif_running(dev)) bnx2x_set_power_state(bp, PCI_D0); + if (netif_running(dev)) { + BNX2X_ERR("IO slot reset --> driver unload\n"); + + /* MCP should have been reset; Need to wait for validity */ + bnx2x_init_shmem(bp); + + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { + u32 v; + + v = SHMEM2_RD(bp, + drv_capabilities_flag[BP_FW_MB_IDX(bp)]); + SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], + v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); + } + bnx2x_drain_tx_queues(bp); + bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); + bnx2x_netif_stop(bp, 1); + bnx2x_free_irq(bp); + + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(bp, true); + + bp->sp_state = 0; + bp->port.pmf = 0; + + bnx2x_prev_unload(bp); + + /* We should have reseted the engine, so It's fair to + * assume the FW will no longer write to the bnx2x driver. + */ + bnx2x_squeeze_objects(bp); + bnx2x_free_skbs(bp); + for_each_rx_queue(bp, i) + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + bnx2x_free_fp_mem(bp); + bnx2x_free_mem(bp); + + bp->state = BNX2X_STATE_CLOSED; + } + rtnl_unlock(); + /* If AER, perform cleanup of the PCIe registers */ + if (bp->flags & AER_ENABLED) { + if (pci_cleanup_aer_uncorrect_error_status(pdev)) + BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n"); + else + DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n"); + } + return PCI_ERS_RESULT_RECOVERED; } @@ -11367,14 +13446,14 @@ static void bnx2x_io_resume(struct pci_dev *pdev) struct bnx2x *bp = netdev_priv(dev); if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - netdev_err(bp->dev, "Handling parity error recovery. " - "Try again later\n"); + netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); return; } rtnl_lock(); - bnx2x_eeh_recover(bp); + bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK; if (netif_running(dev)) bnx2x_nic_load(bp, LOAD_NORMAL); @@ -11384,20 +13463,47 @@ static void bnx2x_io_resume(struct pci_dev *pdev) rtnl_unlock(); } -static struct pci_error_handlers bnx2x_err_handler = { +static const struct pci_error_handlers bnx2x_err_handler = { .error_detected = bnx2x_io_error_detected, .slot_reset = bnx2x_io_slot_reset, .resume = bnx2x_io_resume, }; +static void bnx2x_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + + if (!dev) + return; + + bp = netdev_priv(dev); + if (!bp) + return; + + rtnl_lock(); + netif_device_detach(dev); + rtnl_unlock(); + + /* Don't remove the netdevice, as there are scenarios which will cause + * the kernel to hang, e.g., when trying to remove bnx2i while the + * rootfs is mounted from SAN. + */ + __bnx2x_remove(pdev, dev, bp, false); +} + static struct pci_driver bnx2x_pci_driver = { .name = DRV_MODULE_NAME, .id_table = bnx2x_pci_tbl, .probe = bnx2x_init_one, - .remove = __devexit_p(bnx2x_remove_one), + .remove = bnx2x_remove_one, .suspend = bnx2x_suspend, .resume = bnx2x_resume, .err_handler = &bnx2x_err_handler, +#ifdef CONFIG_BNX2X_SRIOV + .sriov_configure = bnx2x_sriov_configure, +#endif + .shutdown = bnx2x_shutdown, }; static int __init bnx2x_init(void) @@ -11411,20 +13517,38 @@ static int __init bnx2x_init(void) pr_err("Cannot create workqueue\n"); return -ENOMEM; } + bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov"); + if (!bnx2x_iov_wq) { + pr_err("Cannot create iov workqueue\n"); + destroy_workqueue(bnx2x_wq); + return -ENOMEM; + } ret = pci_register_driver(&bnx2x_pci_driver); if (ret) { pr_err("Cannot register driver\n"); destroy_workqueue(bnx2x_wq); + destroy_workqueue(bnx2x_iov_wq); } return ret; } static void __exit bnx2x_cleanup(void) { + struct list_head *pos, *q; + pci_unregister_driver(&bnx2x_pci_driver); destroy_workqueue(bnx2x_wq); + destroy_workqueue(bnx2x_iov_wq); + + /* Free globally allocated resources */ + list_for_each_safe(pos, q, &bnx2x_prev_list) { + struct bnx2x_prev_path_list *tmp = + list_entry(pos, struct bnx2x_prev_path_list, list); + list_del(pos); + kfree(tmp); + } } void bnx2x_notify_link_changed(struct bnx2x *bp) @@ -11435,17 +13559,16 @@ void bnx2x_notify_link_changed(struct bnx2x *bp) module_init(bnx2x_init); module_exit(bnx2x_cleanup); -#ifdef BCM_CNIC /** * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). * * @bp: driver handle * @set: set or clear the CAM entry * - * This function will wait until the ramdord completion returns. + * This function will wait until the ramrod completion returns. * Return 0 if success, -ENODEV if ramrod doesn't return. */ -static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) +static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) { unsigned long ramrod_flags = 0; @@ -11459,6 +13582,7 @@ static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) { struct eth_spe *spe; + int cxt_index, cxt_offset; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) @@ -11469,7 +13593,6 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) BUG_ON(bp->cnic_spq_pending < count); bp->cnic_spq_pending -= count; - for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) & SPE_HDR_CONN_TYPE) >> @@ -11481,10 +13604,16 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) * ramrod */ if (type == ETH_CONNECTION_TYPE) { - if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) - bnx2x_set_ctx_validation(bp, &bp->context. - vcxt[BNX2X_ISCSI_ETH_CID].eth, - BNX2X_ISCSI_ETH_CID); + if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) { + cxt_index = BNX2X_ISCSI_ETH_CID(bp) / + ILT_PAGE_CIDS; + cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - + (cxt_index * ILT_PAGE_CIDS); + bnx2x_set_ctx_validation(bp, + &bp->context[cxt_index]. + vcxt[cxt_offset].eth, + BNX2X_ISCSI_ETH_CID(bp)); + } } /* @@ -11519,7 +13648,7 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) spe = bnx2x_sp_get_next(bp); *spe = *bp->cnic_kwq_cons; - DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n", + DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n", bp->cnic_spq_pending, bp->cnic_kwq_pending, count); if (bp->cnic_kwq_cons == bp->cnic_kwq_last) @@ -11538,10 +13667,18 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev, int i; #ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) + if (unlikely(bp->panic)) { + BNX2X_ERR("Can't post to SP queue while panic\n"); return -EIO; + } #endif + if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && + (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { + BNX2X_ERR("Handling parity error recovery. Try again later\n"); + return -EAGAIN; + } + spin_lock_bh(&bp->spq_lock); for (i = 0; i < count; i++) { @@ -11554,7 +13691,7 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev, bp->cnic_kwq_pending++; - DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n", + DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n", spe->hdr.conn_and_cmd_data, spe->hdr.type, spe->data.update_data_addr.hi, spe->data.update_data_addr.lo, @@ -11628,7 +13765,6 @@ static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) bnx2x_cnic_sp_post(bp, 0); } - /* Called with netif_addr_lock_bh() taken. * Sets an rx_mode config for an iSCSI ETH client. * Doesn't block. @@ -11669,7 +13805,6 @@ static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) } } - static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) { struct bnx2x *bp = netdev_priv(dev); @@ -11757,27 +13892,52 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { int count = ctl->data.credit.credit_count; - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_add(count, &bp->cq_spq_left); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); break; } case DRV_CTL_ULP_REGISTER_CMD: { - int ulp_type = ctl->data.ulp_type; + int ulp_type = ctl->data.register_data.ulp_type; if (CHIP_IS_E3(bp)) { int idx = BP_FW_MB_IDX(bp); - u32 cap; + u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); + int path = BP_PATH(bp); + int port = BP_PORT(bp); + int i; + u32 scratch_offset; + u32 *host_addr; - cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); + /* first write capability to shmem2 */ if (ulp_type == CNIC_ULP_ISCSI) cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; else if (ulp_type == CNIC_ULP_FCOE) cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE; SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); + + if ((ulp_type != CNIC_ULP_FCOE) || + (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) || + (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES))) + break; + + /* if reached here - should write fcoe capabilities */ + scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr); + if (!scratch_offset) + break; + scratch_offset += offsetof(struct glob_ncsi_oem_data, + fcoe_features[path][port]); + host_addr = (u32 *) &(ctl->data.register_data. + fcoe_features); + for (i = 0; i < sizeof(struct fcoe_capabilities); + i += 4) + REG_WR(bp, scratch_offset + i, + *(host_addr + i/4)); } + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); break; } + case DRV_CTL_ULP_UNREGISTER_CMD: { int ulp_type = ctl->data.ulp_type; @@ -11792,6 +13952,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); } + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); break; } @@ -11829,14 +13990,52 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) cp->num_irq = 2; } +void bnx2x_setup_cnic_info(struct bnx2x *bp) +{ + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + + bnx2x_cid_ilt_lines(bp); + cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; + cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); + cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); + + DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n", + BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid, + cp->iscsi_l2_cid); + + if (NO_ISCSI_OOO(bp)) + cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; +} + static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, void *data) { struct bnx2x *bp = netdev_priv(dev); struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + int rc; - if (ops == NULL) + DP(NETIF_MSG_IFUP, "Register_cnic called\n"); + + if (ops == NULL) { + BNX2X_ERR("NULL ops received\n"); return -EINVAL; + } + + if (!CNIC_SUPPORT(bp)) { + BNX2X_ERR("Can't register CNIC when not supported\n"); + return -EOPNOTSUPP; + } + + if (!CNIC_LOADED(bp)) { + rc = bnx2x_load_cnic(bp); + if (rc) { + BNX2X_ERR("CNIC-related load failed\n"); + return rc; + } + } + + bp->cnic_enabled = true; bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!bp->cnic_kwq) @@ -11859,6 +14058,9 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, rcu_assign_pointer(bp->cnic_ops, ops); + /* Schedule driver to read CNIC driver versions */ + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); + return 0; } @@ -11872,13 +14074,14 @@ static int bnx2x_unregister_cnic(struct net_device *dev) RCU_INIT_POINTER(bp->cnic_ops, NULL); mutex_unlock(&bp->cnic_mutex); synchronize_rcu(); + bp->cnic_enabled = false; kfree(bp->cnic_kwq); bp->cnic_kwq = NULL; return 0; } -struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) +static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); struct cnic_eth_dev *cp = &bp->cnic_eth_dev; @@ -11905,10 +14108,10 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) cp->drv_ctl = bnx2x_drv_ctl; cp->drv_register_cnic = bnx2x_register_cnic; cp->drv_unregister_cnic = bnx2x_unregister_cnic; - cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; + cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); cp->iscsi_l2_client_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); - cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; + cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); if (NO_ISCSI_OOO(bp)) cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; @@ -11919,15 +14122,45 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) if (NO_FCOE(bp)) cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; - DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, " - "starting cid %d\n", + BNX2X_DEV_INFO( + "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n", cp->ctx_blk_size, cp->ctx_tbl_offset, cp->ctx_tbl_len, cp->starting_cid); return cp; } -EXPORT_SYMBOL(bnx2x_cnic_probe); -#endif /* BCM_CNIC */ +static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) +{ + struct bnx2x *bp = fp->bp; + u32 offset = BAR_USTRORM_INTMEM; + + if (IS_VF(bp)) + return bnx2x_vf_ustorm_prods_offset(bp, fp); + else if (!CHIP_IS_E1x(bp)) + offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); + else + offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); + + return offset; +} +/* called only on E1H or E2. + * When pretending to be PF, the pretend value is the function number 0...7 + * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID + * combination + */ +int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) +{ + u32 pretend_reg; + + if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX) + return -1; + + /* get my own pretend register */ + pretend_reg = bnx2x_get_pretend_reg(bp); + REG_WR(bp, pretend_reg, pretend_func_val); + REG_RD(bp, pretend_reg); + return 0; +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h new file mode 100644 index 00000000000..caf1aef651e --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h @@ -0,0 +1,168 @@ +/* bnx2x_mfw_req.h: Broadcom Everest network driver. + * + * Copyright (c) 2012-2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNX2X_MFW_REQ_H +#define BNX2X_MFW_REQ_H + +#define PORT_0 0 +#define PORT_1 1 +#define PORT_MAX 2 +#define NVM_PATH_MAX 2 + +/* FCoE capabilities required from the driver */ +struct fcoe_capabilities { + u32 capability1; + /* Maximum number of I/Os per connection */ + #define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff + #define FCOE_IOS_PER_CONNECTION_SHIFT 0 + /* Maximum number of Logins per port */ + #define FCOE_LOGINS_PER_PORT_MASK 0xffff0000 + #define FCOE_LOGINS_PER_PORT_SHIFT 16 + + u32 capability2; + /* Maximum number of exchanges */ + #define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff + #define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0 + /* Maximum NPIV WWN per port */ + #define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000 + #define FCOE_NPIV_WWN_PER_PORT_SHIFT 16 + + u32 capability3; + /* Maximum number of targets supported */ + #define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff + #define FCOE_TARGETS_SUPPORTED_SHIFT 0 + /* Maximum number of outstanding commands across all connections */ + #define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000 + #define FCOE_OUTSTANDING_COMMANDS_SHIFT 16 + + u32 capability4; + #define FCOE_CAPABILITY4_STATEFUL 0x00000001 + #define FCOE_CAPABILITY4_STATELESS 0x00000002 + #define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004 +}; + +struct glob_ncsi_oem_data { + u32 driver_version; + u32 unused[3]; + struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX]; +}; + +/* current drv_info version */ +#define DRV_INFO_CUR_VER 2 + +/* drv_info op codes supported */ +enum drv_info_opcode { + ETH_STATS_OPCODE, + FCOE_STATS_OPCODE, + ISCSI_STATS_OPCODE +}; + +#define ETH_STAT_INFO_VERSION_LEN 12 +/* Per PCI Function Ethernet Statistics required from the driver */ +struct eth_stats_info { + /* Function's Driver Version. padded to 12 */ + u8 version[ETH_STAT_INFO_VERSION_LEN]; + /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */ + u8 mac_local[8]; + u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ + u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */ + u32 mtu_size; /* MTU Size. Note : Negotiated MTU */ + u32 feature_flags; /* Feature_Flags. */ +#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01 +#define FEATURE_ETH_LSO_MASK 0x02 +#define FEATURE_ETH_BOOTMODE_MASK 0x1C +#define FEATURE_ETH_BOOTMODE_SHIFT 2 +#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2) +#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2) +#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2) +#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2) +#define FEATURE_ETH_TOE_MASK 0x20 + u32 lso_max_size; /* LSO MaxOffloadSize. */ + u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */ + /* Num Offloaded Connections TCP_IPv4. */ + u32 ipv4_ofld_cnt; + /* Num Offloaded Connections TCP_IPv6. */ + u32 ipv6_ofld_cnt; + u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */ + u32 txq_size; /* TX Descriptors Queue Size */ + u32 rxq_size; /* RX Descriptors Queue Size */ + /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */ + u32 txq_avg_depth; + /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */ + u32 rxq_avg_depth; + /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/ + u32 iov_offload; + /* Number of NetQueue/VMQ Config'd. */ + u32 netq_cnt; + u32 vf_cnt; /* Num VF assigned to this PF. */ +}; + +/* Per PCI Function FCOE Statistics required from the driver */ +struct fcoe_stats_info { + u8 version[12]; /* Function's Driver Version. */ + u8 mac_local[8]; /* Locally Admin Addr. */ + u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ + u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */ + /* QoS Priority (per 802.1p). 0-7255 */ + u32 qos_priority; + u32 txq_size; /* FCoE TX Descriptors Queue Size. */ + u32 rxq_size; /* FCoE RX Descriptors Queue Size. */ + /* FCoE TX Descriptor Queue Avg Depth. */ + u32 txq_avg_depth; + /* FCoE RX Descriptors Queue Avg Depth. */ + u32 rxq_avg_depth; + u32 rx_frames_lo; /* FCoE RX Frames received. */ + u32 rx_frames_hi; /* FCoE RX Frames received. */ + u32 rx_bytes_lo; /* FCoE RX Bytes received. */ + u32 rx_bytes_hi; /* FCoE RX Bytes received. */ + u32 tx_frames_lo; /* FCoE TX Frames sent. */ + u32 tx_frames_hi; /* FCoE TX Frames sent. */ + u32 tx_bytes_lo; /* FCoE TX Bytes sent. */ + u32 tx_bytes_hi; /* FCoE TX Bytes sent. */ +}; + +/* Per PCI Function iSCSI Statistics required from the driver*/ +struct iscsi_stats_info { + u8 version[12]; /* Function's Driver Version. */ + u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */ + u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ + /* QoS Priority (per 802.1p). 0-7255 */ + u32 qos_priority; + u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */ + u8 ww_port_name[64]; /* iSCSI World wide port name */ + u8 boot_target_name[64];/* iSCSI Boot Target Name. */ + u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */ + u32 boot_target_portal; /* iSCSI Boot Target Portal. */ + u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */ + u32 max_frame_size; /* Max Frame Size. bytes */ + u32 txq_size; /* PDU TX Descriptors Queue Size. */ + u32 rxq_size; /* PDU RX Descriptors Queue Size. */ + u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */ + u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */ + u32 rx_pdus_lo; /* iSCSI PDUs received. */ + u32 rx_pdus_hi; /* iSCSI PDUs received. */ + u32 rx_bytes_lo; /* iSCSI RX Bytes received. */ + u32 rx_bytes_hi; /* iSCSI RX Bytes received. */ + u32 tx_pdus_lo; /* iSCSI PDUs sent. */ + u32 tx_pdus_hi; /* iSCSI PDUs sent. */ + u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */ + u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */ + u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable. + * 9 nibbles, the position of each nibble + * represents the C-PCP value, the value + * of the nibble = S-PCP value. + */ +}; + +union drv_info_to_mcp { + struct eth_stats_info ether_stat; + struct fcoe_stats_info fcoe_stat; + struct iscsi_stats_info iscsi_stat; +}; +#endif /* BNX2X_MFW_REQ_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index dddbcf6e154..2beb5430b87 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -1,6 +1,6 @@ /* bnx2x_reg.h: Broadcom Everest network driver. * - * Copyright (c) 2007-2011 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -35,6 +35,8 @@ #define ATC_REG_ATC_INT_STS_CLR 0x1101c0 /* [RW 5] Parity mask register #0 read/write */ #define ATC_REG_ATC_PRTY_MASK 0x1101d8 +/* [R 5] Parity register #0 read */ +#define ATC_REG_ATC_PRTY_STS 0x1101cc /* [RC 5] Parity register #0 read clear */ #define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0 /* [RW 19] Interrupt mask register #0 read/write */ @@ -825,6 +827,7 @@ /* [RW 28] The value sent to CM header in the case of CFC load error. */ #define DORQ_REG_ERR_CMHEAD 0x170058 #define DORQ_REG_IF_EN 0x170004 +#define DORQ_REG_MAX_RVFID_SIZE 0x1701ec #define DORQ_REG_MODE_ACT 0x170008 /* [RW 5] The normal mode CID extraction offset. */ #define DORQ_REG_NORM_CID_OFST 0x17002c @@ -847,6 +850,22 @@ writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The read reads this written value. */ #define DORQ_REG_RSP_INIT_CRD 0x170048 +#define DORQ_REG_RSPB_CRD_CNT 0x1700b0 +#define DORQ_REG_VF_NORM_CID_BASE 0x1701a0 +#define DORQ_REG_VF_NORM_CID_OFST 0x1701f4 +#define DORQ_REG_VF_NORM_CID_WND_SIZE 0x1701a4 +#define DORQ_REG_VF_NORM_MAX_CID_COUNT 0x1701e4 +#define DORQ_REG_VF_NORM_VF_BASE 0x1701a8 +/* [RW 10] VF type validation mask value */ +#define DORQ_REG_VF_TYPE_MASK_0 0x170218 +/* [RW 17] VF type validation Min MCID value */ +#define DORQ_REG_VF_TYPE_MAX_MCID_0 0x1702d8 +/* [RW 17] VF type validation Max MCID value */ +#define DORQ_REG_VF_TYPE_MIN_MCID_0 0x170298 +/* [RW 10] VF type validation comp value */ +#define DORQ_REG_VF_TYPE_VALUE_0 0x170258 +#define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340 + /* [RW 4] Initial activity counter value on the load request; when the shortcut is done. */ #define DORQ_REG_SHRT_ACT_CNT 0x170070 @@ -859,6 +878,7 @@ #define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2) #define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1) #define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0) +#define DORQ_REG_VF_USAGE_CNT 0x170320 #define HC_REG_AGG_INT_0 0x108050 #define HC_REG_AGG_INT_1 0x108054 #define HC_REG_ATTN_BIT 0x108120 @@ -987,6 +1007,7 @@ * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */ #define IGU_REG_WRITE_DONE_PENDING 0x130480 #define MCP_A_REG_MCPR_SCRATCH 0x3a0000 +#define MCP_REG_MCPR_ACCESS_LOCK 0x8009c #define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c #define MCP_REG_MCPR_GP_INPUTS 0x800c0 #define MCP_REG_MCPR_GP_OENABLE 0x800c8 @@ -1472,16 +1493,136 @@ /* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1 Port. */ #define MISC_REG_BOND_ID 0xa400 -/* [R 8] These bits indicate the metal revision of the chip. This value - starts at 0x00 for each all-layer tape-out and increments by one for each - tape-out. */ -#define MISC_REG_CHIP_METAL 0xa404 /* [R 16] These bits indicate the part number for the chip. */ #define MISC_REG_CHIP_NUM 0xa408 /* [R 4] These bits indicate the base revision of the chip. This value starts at 0x0 for the A0 tape-out and increments by one for each all-layer tape-out. */ #define MISC_REG_CHIP_REV 0xa40c +/* [R 14] otp_misc_do[100:0] spare bits collection: 13:11- + * otp_misc_do[100:98]; 10:7 - otp_misc_do[87:84]; 6:3 - otp_misc_do[75:72]; + * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */ +#define MISC_REG_CHIP_TYPE 0xac60 +#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1) +#define MISC_REG_CPMU_LP_DR_ENABLE 0xa858 +/* [RW 1] FW EEE LPI Enable. When 1 indicates that EEE LPI mode is enabled + * by FW. When 0 indicates that the EEE LPI mode is disabled by FW. Clk + * 25MHz. Reset on hard reset. */ +#define MISC_REG_CPMU_LP_FW_ENABLE_P0 0xa84c +/* [RW 32] EEE LPI Idle Threshold. The threshold value for the idle EEE LPI + * counter. Timer tick is 1 us. Clock 25MHz. Reset on hard reset. */ +#define MISC_REG_CPMU_LP_IDLE_THR_P0 0xa8a0 +/* [RW 18] LPI entry events mask. [0] - Vmain SM Mask. When 1 indicates that + * the Vmain SM end state is disabled. When 0 indicates that the Vmain SM + * end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates that + * the FW command that all Queues are empty is disabled. When 0 indicates + * that the FW command that all Queues are empty is enabled. [2] - FW Early + * Exit Mask / Reserved (Entry mask). When 1 indicates that the FW Early + * Exit command is disabled. When 0 indicates that the FW Early Exit command + * is enabled. This bit applicable only in the EXIT Events Mask registers. + * [3] - PBF Request Mask. When 1 indicates that the PBF Request indication + * is disabled. When 0 indicates that the PBF Request indication is enabled. + * [4] - Tx Request Mask. When =1 indicates that the Tx other Than PBF + * Request indication is disabled. When 0 indicates that the Tx Other Than + * PBF Request indication is enabled. [5] - Rx EEE LPI Status Mask. When 1 + * indicates that the RX EEE LPI Status indication is disabled. When 0 + * indicates that the RX EEE LPI Status indication is enabled. In the EXIT + * Events Masks registers; this bit masks the falling edge detect of the LPI + * Status (Rx LPI is on - off). [6] - Tx Pause Mask. When 1 indicates that + * the Tx Pause indication is disabled. When 0 indicates that the Tx Pause + * indication is enabled. [7] - BRB1 Empty Mask. When 1 indicates that the + * BRB1 EMPTY indication is disabled. When 0 indicates that the BRB1 EMPTY + * indication is enabled. [8] - QM Idle Mask. When 1 indicates that the QM + * IDLE indication is disabled. When 0 indicates that the QM IDLE indication + * is enabled. (One bit for both VOQ0 and VOQ1). [9] - QM LB Idle Mask. When + * 1 indicates that the QM IDLE indication for LOOPBACK is disabled. When 0 + * indicates that the QM IDLE indication for LOOPBACK is enabled. [10] - L1 + * Status Mask. When 1 indicates that the L1 Status indication from the PCIE + * CORE is disabled. When 0 indicates that the RX EEE LPI Status indication + * from the PCIE CORE is enabled. In the EXIT Events Masks registers; this + * bit masks the falling edge detect of the L1 status (L1 is on - off). [11] + * - P0 E0 EEE EEE LPI REQ Mask. When =1 indicates that the P0 E0 EEE EEE + * LPI REQ indication is disabled. When =0 indicates that the P0 E0 EEE LPI + * REQ indication is enabled. [12] - P1 E0 EEE LPI REQ Mask. When =1 + * indicates that the P0 EEE LPI REQ indication is disabled. When =0 + * indicates that the P0 EEE LPI REQ indication is enabled. [13] - P0 E1 EEE + * LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication is + * disabled. When =0 indicates that the P0 EEE LPI REQ indication is + * enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE + * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ + * indication is enabled. [15] - L1 REQ Mask. When =1 indicates that the L1 + * REQ indication is disabled. When =0 indicates that the L1 indication is + * enabled. [16] - Rx EEE LPI Status Edge Detect Mask. When =1 indicates + * that the RX EEE LPI Status Falling Edge Detect indication is disabled (Rx + * EEE LPI is on - off). When =0 indicates that the RX EEE LPI Status + * Falling Edge Detec indication is enabled (Rx EEE LPI is on - off). This + * bit is applicable only in the EXIT Events Masks registers. [17] - L1 + * Status Edge Detect Mask. When =1 indicates that the L1 Status Falling + * Edge Detect indication from the PCIE CORE is disabled (L1 is on - off). + * When =0 indicates that the L1 Status Falling Edge Detect indication from + * the PCIE CORE is enabled (L1 is on - off). This bit is applicable only in + * the EXIT Events Masks registers. Clock 25MHz. Reset on hard reset. */ +#define MISC_REG_CPMU_LP_MASK_ENT_P0 0xa880 +/* [RW 18] EEE LPI exit events mask. [0] - Vmain SM Mask. When 1 indicates + * that the Vmain SM end state is disabled. When 0 indicates that the Vmain + * SM end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates + * that the FW command that all Queues are empty is disabled. When 0 + * indicates that the FW command that all Queues are empty is enabled. [2] - + * FW Early Exit Mask / Reserved (Entry mask). When 1 indicates that the FW + * Early Exit command is disabled. When 0 indicates that the FW Early Exit + * command is enabled. This bit applicable only in the EXIT Events Mask + * registers. [3] - PBF Request Mask. When 1 indicates that the PBF Request + * indication is disabled. When 0 indicates that the PBF Request indication + * is enabled. [4] - Tx Request Mask. When =1 indicates that the Tx other + * Than PBF Request indication is disabled. When 0 indicates that the Tx + * Other Than PBF Request indication is enabled. [5] - Rx EEE LPI Status + * Mask. When 1 indicates that the RX EEE LPI Status indication is disabled. + * When 0 indicates that the RX LPI Status indication is enabled. In the + * EXIT Events Masks registers; this bit masks the falling edge detect of + * the EEE LPI Status (Rx EEE LPI is on - off). [6] - Tx Pause Mask. When 1 + * indicates that the Tx Pause indication is disabled. When 0 indicates that + * the Tx Pause indication is enabled. [7] - BRB1 Empty Mask. When 1 + * indicates that the BRB1 EMPTY indication is disabled. When 0 indicates + * that the BRB1 EMPTY indication is enabled. [8] - QM Idle Mask. When 1 + * indicates that the QM IDLE indication is disabled. When 0 indicates that + * the QM IDLE indication is enabled. (One bit for both VOQ0 and VOQ1). [9] + * - QM LB Idle Mask. When 1 indicates that the QM IDLE indication for + * LOOPBACK is disabled. When 0 indicates that the QM IDLE indication for + * LOOPBACK is enabled. [10] - L1 Status Mask. When 1 indicates that the L1 + * Status indication from the PCIE CORE is disabled. When 0 indicates that + * the RX EEE LPI Status indication from the PCIE CORE is enabled. In the + * EXIT Events Masks registers; this bit masks the falling edge detect of + * the L1 status (L1 is on - off). [11] - P0 E0 EEE EEE LPI REQ Mask. When + * =1 indicates that the P0 E0 EEE EEE LPI REQ indication is disabled. When + * =0 indicates that the P0 E0 EEE LPI REQ indication is enabled. [12] - P1 + * E0 EEE LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication + * is disabled. When =0 indicates that the P0 EEE LPI REQ indication is + * enabled. [13] - P0 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE + * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ + * indication is enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates + * that the P0 EEE LPI REQ indication is disabled. When =0 indicates that + * the P0 EEE LPI REQ indication is enabled. [15] - L1 REQ Mask. When =1 + * indicates that the L1 REQ indication is disabled. When =0 indicates that + * the L1 indication is enabled. [16] - Rx EEE LPI Status Edge Detect Mask. + * When =1 indicates that the RX EEE LPI Status Falling Edge Detect + * indication is disabled (Rx EEE LPI is on - off). When =0 indicates that + * the RX EEE LPI Status Falling Edge Detec indication is enabled (Rx EEE + * LPI is on - off). This bit is applicable only in the EXIT Events Masks + * registers. [17] - L1 Status Edge Detect Mask. When =1 indicates that the + * L1 Status Falling Edge Detect indication from the PCIE CORE is disabled + * (L1 is on - off). When =0 indicates that the L1 Status Falling Edge + * Detect indication from the PCIE CORE is enabled (L1 is on - off). This + * bit is applicable only in the EXIT Events Masks registers.Clock 25MHz. + * Reset on hard reset. */ +#define MISC_REG_CPMU_LP_MASK_EXT_P0 0xa888 +/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number + * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only + * register. Reset on hard reset. */ +#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0 0xa8b8 +/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number + * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only + * register. Reset on hard reset. */ +#define MISC_REG_CPMU_LP_SM_ENT_CNT_P1 0xa8bc /* [RW 32] The following driver registers(1...16) represent 16 drivers and 32 clients. Each client can be controlled by one driver only. One in each bit represent that this driver control the appropriate client (Ex: bit 5 @@ -1686,6 +1827,7 @@ [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13] Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16] rst_pxp_rq_rd_wr; 31:17] reserved */ +#define MISC_REG_RESET_REG_1 0xa580 #define MISC_REG_RESET_REG_2 0xa590 /* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is shared with the driver resides */ @@ -1981,6 +2123,7 @@ #define NIG_REG_LLH1_ERROR_MASK 0x10090 /* [RW 8] event id for llh1 */ #define NIG_REG_LLH1_EVENT_ID 0x10088 +#define NIG_REG_LLH1_FUNC_EN 0x16104 #define NIG_REG_LLH1_FUNC_MEM 0x161c0 #define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160 #define NIG_REG_LLH1_FUNC_MEM_SIZE 16 @@ -2009,6 +2152,8 @@ /* [R 32] Interrupt register #0 read */ #define NIG_REG_NIG_INT_STS_0 0x103b0 #define NIG_REG_NIG_INT_STS_1 0x103c0 +/* [RC 32] Interrupt register #0 read clear */ +#define NIG_REG_NIG_INT_STS_CLR_0 0x103b4 /* [R 32] Legacy E1 and E1H location for parity error mask register. */ #define NIG_REG_NIG_PRTY_MASK 0x103dc /* [RW 32] Parity mask register #0 read/write */ @@ -2176,6 +2321,15 @@ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to * accommodate the 9 input clients to ETS arbiter. */ #define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684 +/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP + * packets to BRB LB interface to forward the packet to the host. All + * packets from MCP are forwarded to the network when this bit is cleared - + * regardless of the configured destination in tx_mng_destination register. + * When MCP-to-host paths for both ports 0 and 1 are disabled - the arbiter + * for BRB LB interface is bypassed and PBF LB traffic is always selected to + * send to BRB LB. + */ +#define NIG_REG_P0_TX_MNG_HOST_ENABLE 0x182f4 #define NIG_REG_P1_HWPFC_ENABLE 0x181d0 #define NIG_REG_P1_MAC_IN_EN 0x185c0 /* [RW 1] Output enable for TX MAC interface */ @@ -2292,6 +2446,12 @@ #define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4 /* [R 1] TX FIFO for transmitting data to MAC is empty. */ #define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594 +/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP + * packets to BRB LB interface to forward the packet to the host. All + * packets from MCP are forwarded to the network when this bit is cleared - + * regardless of the configured destination in tx_mng_destination register. + */ +#define NIG_REG_P1_TX_MNG_HOST_ENABLE 0x182f8 /* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets forwarded to the host. */ #define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8 @@ -2429,6 +2589,7 @@ current task in process). */ #define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c #define PBF_REG_DISABLE_PF 0x1402e8 +#define PBF_REG_DISABLE_VF 0x1402ec /* [RW 18] For port 0: For each client that is subject to WFQ (the * corresponding bit is 1); indicates to which of the credit registers this * client is mapped. For clients which are not credit blocked; their mapping @@ -2591,6 +2752,8 @@ #define PBF_REG_PBF_INT_STS 0x1401c8 /* [RW 20] Parity mask register #0 read/write */ #define PBF_REG_PBF_PRTY_MASK 0x1401e4 +/* [R 28] Parity register #0 read */ +#define PBF_REG_PBF_PRTY_STS 0x1401d8 /* [RC 20] Parity register #0 read clear */ #define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc /* [RW 16] The Ethernet type value for L2 tag 0 */ @@ -2701,6 +2864,17 @@ #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430 #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434 #define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438 +/* [W 7] Writing 1 to each bit in this register clears a corresponding error + * details register and enables logging new error details. Bit 0 - clears + * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears + * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS + * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32 + * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 - + * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears + * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6 + * - clears TCPL_IN_TWO_RCBS_DETAILS. */ +#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x943c + /* [R 9] Interrupt register #0 read */ #define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298 /* [RC 9] Interrupt register #0 read clear */ @@ -3566,6 +3740,10 @@ #define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c /* [WB 160] Used for initialization of the inbound interrupts memory */ #define PXP_REG_HST_INBOUND_INT 0x103800 +/* [RW 7] Indirect access to the permission table. The fields are : {Valid; + * VFID[5:0]} + */ +#define PXP_REG_HST_ZONE_PERMISSION_TABLE 0x103400 /* [RW 32] Interrupt mask register #0 read/write */ #define PXP_REG_PXP_INT_MASK_0 0x103074 #define PXP_REG_PXP_INT_MASK_1 0x103084 @@ -4354,6 +4532,8 @@ #define TM_REG_TM_INT_STS 0x1640f0 /* [RW 7] Parity mask register #0 read/write */ #define TM_REG_TM_PRTY_MASK 0x16410c +/* [R 7] Parity register #0 read */ +#define TM_REG_TM_PRTY_STS 0x164100 /* [RC 7] Parity register #0 read clear */ #define TM_REG_TM_PRTY_STS_CLR 0x164104 /* [RW 8] The event id for aggregated interrupt 0 */ @@ -4812,6 +4992,7 @@ The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] - header pointer. */ #define UCM_REG_XX_TABLE 0xe0300 +#define UMAC_COMMAND_CONFIG_REG_HD_ENA (0x1<<10) #define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28) #define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15) #define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24) @@ -4822,6 +5003,10 @@ #define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13) #define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0) #define UMAC_REG_COMMAND_CONFIG 0x8 +/* [RW 16] This is the duration for which MAC must wait to go back to ACTIVE + * state from LPI state when it receives packet for transmission. The + * decrement unit is 1 micro-second. */ +#define UMAC_REG_EEE_WAKE_TIMER 0x6c /* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers * to bit 17 of the MAC address etc. */ #define UMAC_REG_MAC_ADDR0 0xc @@ -4831,6 +5016,8 @@ /* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive * logic to check frames. */ #define UMAC_REG_MAXFR 0x14 +#define UMAC_REG_UMAC_EEE_CTRL 0x64 +#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN (0x1<<3) /* [RW 8] The event id for aggregated interrupt 0 */ #define USDM_REG_AGG_INT_EVENT_0 0xc4038 #define USDM_REG_AGG_INT_EVENT_1 0xc403c @@ -5349,8 +5536,10 @@ #define XMAC_CTRL_REG_RX_EN (0x1<<1) #define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) #define XMAC_CTRL_REG_TX_EN (0x1<<0) +#define XMAC_CTRL_REG_XLGMII_ALIGN_ENB (0x1<<7) #define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18) #define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17) +#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON (0x1<<1) #define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0) #define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3) #define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4) @@ -5363,14 +5552,19 @@ /* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC * packets transmitted by the MAC */ #define XMAC_REG_CTRL_SA_LO 0x28 +#define XMAC_REG_EEE_CTRL 0xd8 +#define XMAC_REG_EEE_TIMERS_HI 0xe4 #define XMAC_REG_PAUSE_CTRL 0x68 #define XMAC_REG_PFC_CTRL 0x70 #define XMAC_REG_PFC_CTRL_HI 0x74 +#define XMAC_REG_RX_LSS_CTRL 0x50 #define XMAC_REG_RX_LSS_STATUS 0x58 /* [RW 14] Maximum packet size in receive direction; exclusive of preamble & * CRC in strip mode */ #define XMAC_REG_RX_MAX_SIZE 0x40 #define XMAC_REG_TX_CTRL 0x20 +#define XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE (0x1<<0) +#define XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE (0x1<<1) /* [RW 16] Indirect access to the XX table of the XX protection mechanism. The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] - header pointer. */ @@ -5605,6 +5799,7 @@ /* [RC 32] Parity register #0 read clear */ #define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128 #define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138 +#define MCPR_ACCESS_LOCK_LOCK (1L<<31) #define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) #define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) #define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) @@ -5731,10 +5926,13 @@ #define MISC_REGISTERS_GPIO_PORT_SHIFT 4 #define MISC_REGISTERS_GPIO_SET_POS 8 #define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 +#define MISC_REGISTERS_RESET_REG_1_RST_BRB1 (0x1<<0) +#define MISC_REGISTERS_RESET_REG_1_RST_DORQ (0x1<<19) #define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29) #define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7) #define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26) #define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27) +#define MISC_REGISTERS_RESET_REG_1_RST_XSEM (0x1<<22) #define MISC_REGISTERS_RESET_REG_1_SET 0x584 #define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 #define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24) @@ -5783,17 +5981,31 @@ #define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1 #define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 #define MISC_REGISTERS_SPIO_SET_POS 8 -#define HW_LOCK_DRV_FLAGS 10 +#define MISC_SPIO_CLR_POS 16 +#define MISC_SPIO_FLOAT (0xffL<<24) +#define MISC_SPIO_FLOAT_POS 24 +#define MISC_SPIO_INPUT_HI_Z 2 +#define MISC_SPIO_INT_OLD_SET_POS 16 +#define MISC_SPIO_OUTPUT_HIGH 1 +#define MISC_SPIO_OUTPUT_LOW 0 +#define MISC_SPIO_SET_POS 8 +#define MISC_SPIO_SPIO4 0x10 +#define MISC_SPIO_SPIO5 0x20 #define HW_LOCK_MAX_RESOURCE_VALUE 31 +#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13 +#define HW_LOCK_RESOURCE_DRV_FLAGS 10 #define HW_LOCK_RESOURCE_GPIO 1 #define HW_LOCK_RESOURCE_MDIO 0 +#define HW_LOCK_RESOURCE_NVRAM 12 #define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 -#define HW_LOCK_RESOURCE_SPIO 2 +#define HW_LOCK_RESOURCE_RECOVERY_REG 11 #define HW_LOCK_RESOURCE_RESET 5 +#define HW_LOCK_RESOURCE_SPIO 2 #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT (0x1<<19) #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) #define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31) #define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30) @@ -5988,7 +6200,9 @@ #define PCICFG_COMMAND_INT_DISABLE (1<<10) #define PCICFG_COMMAND_RESERVED (0x1f<<11) #define PCICFG_STATUS_OFFSET 0x06 -#define PCICFG_REVESION_ID_OFFSET 0x08 +#define PCICFG_REVISION_ID_OFFSET 0x08 +#define PCICFG_REVESION_ID_MASK 0xff +#define PCICFG_REVESION_ID_ERROR_VAL 0xff #define PCICFG_CACHE_LINE_SIZE 0x0c #define PCICFG_LATENCY_TIMER 0x0d #define PCICFG_BAR_1_LOW 0x10 @@ -6023,7 +6237,8 @@ #define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23) #define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24) #define PCICFG_GRC_ADDRESS 0x78 -#define PCICFG_GRC_DATA 0x80 +#define PCICFG_GRC_DATA 0x80 +#define PCICFG_ME_REGISTER 0x98 #define PCICFG_MSIX_CAP_ID_OFFSET 0xa0 #define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16) #define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27) @@ -6130,6 +6345,18 @@ #define PCI_PM_DATA_B 0x414 #define PCI_ID_VAL1 0x434 #define PCI_ID_VAL2 0x438 +#define PCI_ID_VAL3 0x43c + +#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C +#define GRC_CONFIG_REG_PF_INIT_VF 0x624 +#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf +/* First VF_NUM for PF is encoded in this register. + * The number of VFs assigned to a PF is assumed to be a multiple of 8. + * Software should program these bits based on Total Number of VFs \ + * programmed for each PF. + * Since registers from 0x000-0x7ff are split across functions, each PF will + * have the same location for the same 4 bits + */ #define PXPCS_TL_CONTROL_5 0x814 #define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/ @@ -6379,6 +6606,27 @@ (7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */ +#define PXP_VF_ADDR_IGU_START 0 +#define PXP_VF_ADDR_IGU_SIZE 0x3000 +#define PXP_VF_ADDR_IGU_END\ + ((PXP_VF_ADDR_IGU_START) + (PXP_VF_ADDR_IGU_SIZE) - 1) + +#define PXP_VF_ADDR_USDM_QUEUES_START 0x3000 +#define PXP_VF_ADDR_USDM_QUEUES_SIZE\ + (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE) +#define PXP_VF_ADDR_USDM_QUEUES_END\ + ((PXP_VF_ADDR_USDM_QUEUES_START) + (PXP_VF_ADDR_USDM_QUEUES_SIZE) - 1) + +#define PXP_VF_ADDR_CSDM_GLOBAL_START 0x7600 +#define PXP_VF_ADDR_CSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE) +#define PXP_VF_ADDR_CSDM_GLOBAL_END\ + ((PXP_VF_ADDR_CSDM_GLOBAL_START) + (PXP_VF_ADDR_CSDM_GLOBAL_SIZE) - 1) + +#define PXP_VF_ADDR_DB_START 0x7c00 +#define PXP_VF_ADDR_DB_SIZE 0x200 +#define PXP_VF_ADDR_DB_END\ + ((PXP_VF_ADDR_DB_START) + (PXP_VF_ADDR_DB_SIZE) - 1) + #define MDIO_REG_BANK_CL73_IEEEB0 0x0 #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200 @@ -6401,6 +6649,7 @@ #define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800 #define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00 #define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00 +#define MDIO_CL73_IEEEB1_AN_LP_ADV2 0x04 #define MDIO_REG_BANK_RX0 0x80b0 #define MDIO_RX0_RX_STATUS 0x10 @@ -6528,6 +6777,7 @@ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00 #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00 #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 0x3900 #define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130 @@ -6794,24 +7044,31 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00 #define MDIO_AN_REG_ADV 0x0011 #define MDIO_AN_REG_ADV2 0x0012 -#define MDIO_AN_REG_LP_AUTO_NEG 0x0013 +#define MDIO_AN_REG_LP_AUTO_NEG 0x0013 +#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014 #define MDIO_AN_REG_MASTER_STATUS 0x0021 +#define MDIO_AN_REG_EEE_ADV 0x003c +#define MDIO_AN_REG_LP_EEE_ADV 0x003d /*bcm*/ #define MDIO_AN_REG_LINK_STATUS 0x8304 #define MDIO_AN_REG_CL37_CL73 0x8370 #define MDIO_AN_REG_CL37_AN 0xffe0 #define MDIO_AN_REG_CL37_FC_LD 0xffe4 -#define MDIO_AN_REG_CL37_FC_LP 0xffe5 +#define MDIO_AN_REG_CL37_FC_LP 0xffe5 +#define MDIO_AN_REG_1000T_STATUS 0xffea #define MDIO_AN_REG_8073_2_5G 0x8329 #define MDIO_AN_REG_8073_BAM 0x8350 #define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020 #define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0 +#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40 #define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1 #define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4 #define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6 #define MDIO_AN_REG_8481_1000T_CTRL 0xffe9 +#define MDIO_AN_REG_8481_1G_100T_EXT_CTRL 0xfff0 +#define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF 0x0008 #define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5 #define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7 #define MDIO_AN_REG_8481_AUX_CTRL 0xfff8 @@ -6845,6 +7102,9 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 /* BCM84833 only */ +#define MDIO_84833_TOP_CFG_FW_REV 0x400f +#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1 +#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81 #define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a #define MDIO_84833_SUPER_ISOLATE 0x8000 /* These are mailbox register set used by 84833. */ @@ -6892,7 +7152,8 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000 -#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 +#define MDIO_WC_REG_PCS_STATUS2 0x0021 +#define MDIO_WC_REG_PMD_KR_CONTROL 0x0096 #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e #define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010 @@ -6919,11 +7180,13 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca #define MDIO_WC_REG_RX2_PCI_CTRL 0x80da #define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea +#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa #define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 #define MDIO_WC_REG_XGXS_STATUS3 0x8129 #define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 #define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131 #define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141 +#define MDIO_WC_REG_XGXS_X2_CONTROL3 0x8142 #define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B #define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169 #define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0 @@ -6931,6 +7194,10 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2 #define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3 #define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1 #define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE #define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0 #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2 @@ -6954,6 +7221,7 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a #define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00 #define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000 +#define MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP 0x82e2 #define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3 #define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6 #define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7 @@ -6966,12 +7234,22 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308 #define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309 #define MDIO_WC_REG_DIGITAL3_UP1 0x8329 +#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c #define MDIO_WC_REG_DIGITAL4_MISC3 0x833c +#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e #define MDIO_WC_REG_DIGITAL5_MISC6 0x8345 #define MDIO_WC_REG_DIGITAL5_MISC7 0x8349 +#define MDIO_WC_REG_DIGITAL5_LINK_STATUS 0x834d #define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e #define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350 #define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368 +#define MDIO_WC_REG_CL73_USERB0_CTRL 0x8370 +#define MDIO_WC_REG_CL73_USERB0_USTAT 0x8371 +#define MDIO_WC_REG_CL73_BAM_CTRL1 0x8372 +#define MDIO_WC_REG_CL73_BAM_CTRL2 0x8373 +#define MDIO_WC_REG_CL73_BAM_CTRL3 0x8374 +#define MDIO_WC_REG_CL73_BAM_CODE_FIELD 0x837b +#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390 #define MDIO_WC_REG_TX66_CONTROL 0x83b0 #define MDIO_WC_REG_RX66_CONTROL 0x83c0 #define MDIO_WC_REG_RX66_SCW0 0x83c2 @@ -6984,7 +7262,17 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9 #define MDIO_WC_REG_FX100_CTRL1 0x8400 #define MDIO_WC_REG_FX100_CTRL3 0x8402 - +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL5 0x8436 +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL6 0x8437 +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL7 0x8438 +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL9 0x8439 +#define MDIO_WC_REG_CL82_USERB1_RX_CTRL10 0x843a +#define MDIO_WC_REG_CL82_USERB1_RX_CTRL11 0x843b +#define MDIO_WC_REG_ETA_CL73_OUI1 0x8453 +#define MDIO_WC_REG_ETA_CL73_OUI2 0x8454 +#define MDIO_WC_REG_ETA_CL73_OUI3 0x8455 +#define MDIO_WC_REG_ETA_CL73_LD_BAM_CODE 0x8456 +#define MDIO_WC_REG_ETA_CL73_LD_UD_CODE 0x8457 #define MDIO_WC_REG_MICROBLK_CMD 0xffc2 #define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5 #define MDIO_WC_REG_MICROBLK_CMD3 0xffcc @@ -7006,10 +7294,12 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_REG_GPHY_ID_54618SE 0x5cd5 #define MDIO_REG_GPHY_CL45_ADDR_REG 0xd #define MDIO_REG_GPHY_CL45_DATA_REG 0xe -#define MDIO_REG_GPHY_EEE_ADV 0x3c -#define MDIO_REG_GPHY_EEE_1G (0x1 << 2) -#define MDIO_REG_GPHY_EEE_100 (0x1 << 1) #define MDIO_REG_GPHY_EEE_RESOLVED 0x803e +#define MDIO_REG_GPHY_EXP_ACCESS_GATE 0x15 +#define MDIO_REG_GPHY_EXP_ACCESS 0x17 +#define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00 +#define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40 +#define MDIO_REG_GPHY_AUX_STATUS 0x19 #define MDIO_REG_INTR_STATUS 0x1a #define MDIO_REG_INTR_MASK 0x1b #define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1) @@ -7124,8 +7414,7 @@ Theotherbitsarereservedandshouldbezero*/ #define CDU_REGION_NUMBER_UCM_AG 4 -/** - * String-to-compress [31:8] = CID (all 24 bits) +/* String-to-compress [31:8] = CID (all 24 bits) * String-to-compress [7:4] = Region * String-to-compress [3:0] = Type */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index cb6339c3557..b1936044767 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -1,6 +1,6 @@ /* bnx2x_sp.c: Broadcom Everest network driver. * - * Copyright 2011 Broadcom Corporation + * Copyright (c) 2011-2013 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -12,7 +12,7 @@ * license other than the GPL, without Broadcom's express prior written * consent. * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Vladislav Zolotarov * */ @@ -30,16 +30,14 @@ #define BNX2X_MAX_EMUL_MULTI 16 -#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) - /**** Exe Queue interfaces ****/ /** * bnx2x_exe_queue_init - init the Exe Queue object * - * @o: poiter to the object + * @o: pointer to the object * @exe_len: length - * @owner: poiter to the owner + * @owner: pointer to the owner * @validate: validate function pointer * @optimize: optimize function pointer * @exec: execute function pointer @@ -72,8 +70,8 @@ static inline void bnx2x_exe_queue_init(struct bnx2x *bp, o->execute = exec; o->get = get; - DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk " - "length of %d\n", exe_len); + DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n", + exe_len); } static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp, @@ -126,7 +124,7 @@ static inline int bnx2x_exe_queue_add(struct bnx2x *bp, /* Check if this request is ok */ rc = o->validate(bp, o->owner, elem); if (rc) { - BNX2X_ERR("Preamble failed: %d\n", rc); + DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc); goto free_and_exit; } } @@ -144,7 +142,6 @@ free_and_exit: spin_unlock_bh(&o->lock); return rc; - } static inline void __bnx2x_exe_queue_reset_pending( @@ -162,18 +159,6 @@ static inline void __bnx2x_exe_queue_reset_pending( } } -static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, - struct bnx2x_exe_queue_obj *o) -{ - - spin_lock_bh(&o->lock); - - __bnx2x_exe_queue_reset_pending(bp, o); - - spin_unlock_bh(&o->lock); - -} - /** * bnx2x_exe_queue_step - execute one execution chunk atomically * @@ -181,7 +166,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, * @o: queue * @ramrod_flags: flags * - * (Atomicy is ensured using the exe_queue->lock). + * (Should be called while holding the exe_queue->lock). */ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, struct bnx2x_exe_queue_obj *o, @@ -192,10 +177,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, memset(&spacer, 0, sizeof(spacer)); - spin_lock_bh(&o->lock); - - /* - * Next step should not be performed until the current is finished, + /* Next step should not be performed until the current is finished, * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to * properly clear object internals without sending any command to the FW * which also implies there won't be any completion to clear the @@ -203,17 +185,14 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, */ if (!list_empty(&o->pending_comp)) { if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { - DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: " - "resetting pending_comp\n"); + DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); __bnx2x_exe_queue_reset_pending(bp, o); } else { - spin_unlock_bh(&o->lock); return 1; } } - /* - * Run through the pending commands list and create a next + /* Run through the pending commands list and create a next * execution chunk. */ while (!list_empty(&o->exe_queue)) { @@ -223,41 +202,34 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, if (cur_len + elem->cmd_len <= o->exe_chunk_len) { cur_len += elem->cmd_len; - /* - * Prevent from both lists being empty when moving an + /* Prevent from both lists being empty when moving an * element. This will allow the call of * bnx2x_exe_queue_empty() without locking. */ list_add_tail(&spacer.link, &o->pending_comp); mb(); - list_del(&elem->link); - list_add_tail(&elem->link, &o->pending_comp); + list_move_tail(&elem->link, &o->pending_comp); list_del(&spacer.link); } else break; } /* Sanity check */ - if (!cur_len) { - spin_unlock_bh(&o->lock); + if (!cur_len) return 0; - } rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); if (rc < 0) - /* - * In case of an error return the commands back to the queue - * and reset the pending_comp. + /* In case of an error return the commands back to the queue + * and reset the pending_comp. */ list_splice_init(&o->pending_comp, &o->exe_queue); else if (!rc) - /* - * If zero is returned, means there are no outstanding pending + /* If zero is returned, means there are no outstanding pending * completions and we may dismiss the pending list. */ __bnx2x_exe_queue_reset_pending(bp, o); - spin_unlock_bh(&o->lock); return rc; } @@ -286,16 +258,16 @@ static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o) static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(o->state, o->pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(o->state, o->pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } /** @@ -312,7 +284,6 @@ static inline int bnx2x_state_wait(struct bnx2x *bp, int state, /* can take a while if any port is running */ int cnt = 5000; - if (CHIP_REV_IS_EMUL(bp)) cnt *= 20; @@ -327,7 +298,7 @@ static inline int bnx2x_state_wait(struct bnx2x *bp, int state, return 0; } - usleep_range(1000, 1000); + usleep_range(1000, 2000); if (bp->panic) return -EIO; @@ -384,23 +355,6 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) return vp->get(vp, 1); } - -static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) -{ - struct bnx2x_credit_pool_obj *mp = o->macs_pool; - struct bnx2x_credit_pool_obj *vp = o->vlans_pool; - - if (!mp->get(mp, 1)) - return false; - - if (!vp->get(vp, 1)) { - mp->put(mp, 1); - return false; - } - - return true; -} - static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) { struct bnx2x_credit_pool_obj *mp = o->macs_pool; @@ -429,140 +383,296 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) return vp->put(vp, 1); } -static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) +/** + * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details: Non-blocking implementation; should be called under execution + * queue lock. + */ +static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) { - struct bnx2x_credit_pool_obj *mp = o->macs_pool; - struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + if (o->head_reader) { + DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n"); + return -EBUSY; + } - if (!mp->put(mp, 1)) - return false; + DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n"); + return 0; +} - if (!vp->put(vp, 1)) { - mp->get(mp, 1); - return false; +/** + * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock; notice it might release + * and reclaim it during its run. + */ +static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + int rc; + unsigned long ramrod_flags = o->saved_ramrod_flags; + + DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n", + ramrod_flags); + o->head_exe_request = false; + o->saved_ramrod_flags = 0; + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags); + if (rc != 0) { + BNX2X_ERR("execution of pending commands failed with rc %d\n", + rc); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif } +} - return true; +/** + * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run + * + * @bp: device handle + * @o: vlan_mac object + * @ramrod_flags: ramrod flags of missed execution + * + * @details Should be called under execution queue lock. + */ +static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long ramrod_flags) +{ + o->head_exe_request = true; + o->saved_ramrod_flags = ramrod_flags; + DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n", + ramrod_flags); +} + +/** + * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock. Notice if a pending + * execution exists, it would perform it - possibly releasing and + * reclaiming the execution queue lock. + */ +static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + /* It's possible a new pending execution was added since this writer + * executed. If so, execute again. [Ad infinitum] + */ + while (o->head_exe_request) { + DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n"); + __bnx2x_vlan_mac_h_exec_pending(bp, o); + } +} + + +/** + * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under the execution queue lock. May sleep. May + * release and reclaim execution queue lock during its run. + */ +static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + /* If we got here, we're holding lock --> no WRITER exists */ + o->head_reader++; + DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n", + o->head_reader); + + return 0; +} + +/** + * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details May sleep. Claims and releases execution queue lock during its run. + */ +int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + int rc; + + spin_lock_bh(&o->exe_queue.lock); + rc = __bnx2x_vlan_mac_h_read_lock(bp, o); + spin_unlock_bh(&o->exe_queue.lock); + + return rc; +} + +/** + * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock. Notice if a pending + * execution exists, it would be performed if this was the last + * reader. possibly releasing and reclaiming the execution queue lock. + */ +static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + if (!o->head_reader) { + BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n"); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + } else { + o->head_reader--; + DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n", + o->head_reader); + } + + /* It's possible a new pending execution was added, and that this reader + * was last - if so we need to execute the command. + */ + if (!o->head_reader && o->head_exe_request) { + DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n"); + + /* Writer release will do the trick */ + __bnx2x_vlan_mac_h_write_unlock(bp, o); + } +} + +/** + * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Notice if a pending execution exists, it would be performed if this + * was the last reader. Claims and releases the execution queue lock + * during its run. + */ +void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + spin_lock_bh(&o->exe_queue.lock); + __bnx2x_vlan_mac_h_read_unlock(bp, o); + spin_unlock_bh(&o->exe_queue.lock); } static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, - int n, u8 *buf) + int n, u8 *base, u8 stride, u8 size) { struct bnx2x_vlan_mac_registry_elem *pos; - u8 *next = buf; + u8 *next = base; int counter = 0; + int read_lock; + + DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n"); + read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); + if (read_lock != 0) + BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n"); /* traverse list */ list_for_each_entry(pos, &o->head, link) { if (counter < n) { - /* place leading zeroes in buffer */ - memset(next, 0, MAC_LEADING_ZERO_CNT); - - /* place mac after leading zeroes*/ - memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac, - ETH_ALEN); - - /* calculate address of next element and - * advance counter - */ + memcpy(next, &pos->u, size); counter++; - next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32)); - - DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n", - counter, next, pos->u.mac.mac); + DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n", + counter, next); + next += stride + size; } } + + if (read_lock == 0) { + DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n"); + bnx2x_vlan_mac_h_read_unlock(bp, o); + } + return counter * ETH_ALEN; } /* check_add() callbacks */ -static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o, +static int bnx2x_check_mac_add(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data) { struct bnx2x_vlan_mac_registry_elem *pos; + DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac); + if (!is_valid_ether_addr(data->mac.mac)) return -EINVAL; /* Check if a requested MAC already exists */ list_for_each_entry(pos, &o->head, link) - if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) + if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) && + (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) return -EEXIST; return 0; } -static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o, +static int bnx2x_check_vlan_add(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data) { struct bnx2x_vlan_mac_registry_elem *pos; - list_for_each_entry(pos, &o->head, link) - if (data->vlan.vlan == pos->u.vlan.vlan) - return -EEXIST; - - return 0; -} - -static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o, - union bnx2x_classification_ramrod_data *data) -{ - struct bnx2x_vlan_mac_registry_elem *pos; + DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan); list_for_each_entry(pos, &o->head, link) - if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && - (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, - ETH_ALEN))) + if (data->vlan.vlan == pos->u.vlan.vlan) return -EEXIST; return 0; } - /* check_del() callbacks */ static struct bnx2x_vlan_mac_registry_elem * - bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o, + bnx2x_check_mac_del(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data) { struct bnx2x_vlan_mac_registry_elem *pos; + DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac); + list_for_each_entry(pos, &o->head, link) - if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) + if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) && + (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) return pos; return NULL; } static struct bnx2x_vlan_mac_registry_elem * - bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o, + bnx2x_check_vlan_del(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data) { struct bnx2x_vlan_mac_registry_elem *pos; - list_for_each_entry(pos, &o->head, link) - if (data->vlan.vlan == pos->u.vlan.vlan) - return pos; - - return NULL; -} - -static struct bnx2x_vlan_mac_registry_elem * - bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o, - union bnx2x_classification_ramrod_data *data) -{ - struct bnx2x_vlan_mac_registry_elem *pos; + DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan); list_for_each_entry(pos, &o->head, link) - if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && - (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, - ETH_ALEN))) + if (data->vlan.vlan == pos->u.vlan.vlan) return pos; return NULL; } /* check_move() callback */ -static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o, +static bool bnx2x_check_move(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *src_o, struct bnx2x_vlan_mac_obj *dst_o, union bnx2x_classification_ramrod_data *data) { @@ -572,10 +682,10 @@ static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o, /* Check if we can delete the requested configuration from the first * object. */ - pos = src_o->check_del(src_o, data); + pos = src_o->check_del(bp, src_o, data); /* check if configuration can be added */ - rc = dst_o->check_add(dst_o, data); + rc = dst_o->check_add(bp, dst_o, data); /* If this classification can not be added (is already set) * or can't be deleted - return an error. @@ -587,6 +697,7 @@ static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o, } static bool bnx2x_check_move_always_err( + struct bnx2x *bp, struct bnx2x_vlan_mac_obj *src_o, struct bnx2x_vlan_mac_obj *dst_o, union bnx2x_classification_ramrod_data *data) @@ -594,7 +705,6 @@ static bool bnx2x_check_move_always_err( return false; } - static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) { struct bnx2x_raw_obj *raw = &o->raw; @@ -611,21 +721,17 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) return rx_tx_flag; } -/* LLH CAM line allocations */ -enum { - LLH_CAM_ISCSI_ETH_LINE = 0, - LLH_CAM_ETH_LINE, - LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 -}; - -static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp, +static void bnx2x_set_mac_in_nig(struct bnx2x *bp, bool add, unsigned char *dev_addr, int index) { u32 wb_data[2]; u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : NIG_REG_LLH0_FUNC_MEM; - if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE) + if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp)) + return; + + if (index > BNX2X_LLH_CAM_MAX_PF_LINE) return; DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", @@ -681,7 +787,7 @@ static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, * * @cid: connection id * @type: BNX2X_FILTER_XXX_PENDING - * @hdr: poiter to header to setup + * @hdr: pointer to header to setup * @rule_cnt: * * currently we always configure one rule and echo field to contain a CID and an @@ -690,11 +796,11 @@ static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, struct eth_classify_header *hdr, int rule_cnt) { - hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT); + hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) | + (type << BNX2X_SWCID_SHIFT)); hdr->rule_cnt = (u8)rule_cnt; } - /* hw_config() callbacks */ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, @@ -710,8 +816,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; - /* - * Set LLH CAM entry: currently only iSCSI and ETH macs are + /* Set LLH CAM entry: currently only iSCSI and ETH macs are * relevant. In addition, current implementation is tuned for a * single ETH MAC. * @@ -731,9 +836,10 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, if (cmd != BNX2X_VLAN_MAC_MOVE) { if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags)) bnx2x_set_mac_in_nig(bp, add, mac, - LLH_CAM_ISCSI_ETH_LINE); + BNX2X_LLH_CAM_ISCSI_ETH_LINE); else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags)) - bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE); + bnx2x_set_mac_in_nig(bp, add, mac, + BNX2X_LLH_CAM_ETH_LINE); } /* Reset the ramrod data buffer for the first rule */ @@ -745,12 +851,14 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, &rule_entry->mac.header); DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n", - add ? "add" : "delete", mac, raw->cl_id); + (add ? "add" : "delete"), mac, raw->cl_id); /* Set a MAC itself */ bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, &rule_entry->mac.mac_mid, &rule_entry->mac.mac_lsb, mac); + rule_entry->mac.inner_mac = + cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac); /* MOVE: Add a rule that will add this MAC to the target Queue */ if (cmd == BNX2X_VLAN_MAC_MOVE) { @@ -767,6 +875,9 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, &rule_entry->mac.mac_mid, &rule_entry->mac.mac_lsb, mac); + rule_entry->mac.inner_mac = + cpu_to_le16(elem->cmd_data.vlan_mac. + u.mac.is_inner_mac); } /* Set the ramrod data header */ @@ -795,8 +906,9 @@ static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, hdr->length = 1; hdr->offset = (u8)cam_offset; - hdr->client_id = 0xff; - hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT)); + hdr->client_id = cpu_to_le16(0xff); + hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | + (type << BNX2X_SWCID_SHIFT)); } static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp, @@ -838,7 +950,7 @@ static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp, cfg_entry); DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n", - add ? "setting" : "clearing", + (add ? "setting" : "clearing"), mac, raw->cl_id, cam_offset); } @@ -859,8 +971,7 @@ static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, struct bnx2x_raw_obj *raw = &o->raw; struct mac_configuration_cmd *config = (struct mac_configuration_cmd *)(raw->rdata); - /* - * 57710 and 57711 do not support MOVE command, + /* 57710 and 57711 do not support MOVE command, * so it's either ADD or DEL */ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? @@ -869,7 +980,7 @@ static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, /* Reset the ramrod data buffer */ memset(config, 0, sizeof(*config)); - bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING, + bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state, cam_offset, add, elem->cmd_data.vlan_mac.u.mac.mac, 0, ETH_VLAN_FILTER_ANY_VLAN, config); @@ -885,7 +996,7 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, (struct eth_classify_rules_ramrod_data *)(raw->rdata); int rule_cnt = rule_idx + 1; union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; - int cmd = elem->cmd_data.vlan_mac.cmd; + enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; @@ -925,104 +1036,12 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, rule_cnt); } -static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - struct bnx2x_exeq_elem *elem, - int rule_idx, int cam_offset) -{ - struct bnx2x_raw_obj *raw = &o->raw; - struct eth_classify_rules_ramrod_data *data = - (struct eth_classify_rules_ramrod_data *)(raw->rdata); - int rule_cnt = rule_idx + 1; - union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; - int cmd = elem->cmd_data.vlan_mac.cmd; - bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; - u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; - u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; - - - /* Reset the ramrod data buffer for the first rule */ - if (rule_idx == 0) - memset(data, 0, sizeof(*data)); - - /* Set a rule header */ - bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, - &rule_entry->pair.header); - - /* Set VLAN and MAC themselvs */ - rule_entry->pair.vlan = cpu_to_le16(vlan); - bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, - &rule_entry->pair.mac_mid, - &rule_entry->pair.mac_lsb, mac); - - /* MOVE: Add a rule that will add this MAC to the target Queue */ - if (cmd == BNX2X_VLAN_MAC_MOVE) { - rule_entry++; - rule_cnt++; - - /* Setup ramrod data */ - bnx2x_vlan_mac_set_cmd_hdr_e2(bp, - elem->cmd_data.vlan_mac.target_obj, - true, CLASSIFY_RULE_OPCODE_PAIR, - &rule_entry->pair.header); - - /* Set a VLAN itself */ - rule_entry->pair.vlan = cpu_to_le16(vlan); - bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, - &rule_entry->pair.mac_mid, - &rule_entry->pair.mac_lsb, mac); - } - - /* Set the ramrod data header */ - /* TODO: take this to the higher level in order to prevent multiple - writing */ - bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, - rule_cnt); -} - -/** - * bnx2x_set_one_vlan_mac_e1h - - * - * @bp: device handle - * @o: bnx2x_vlan_mac_obj - * @elem: bnx2x_exeq_elem - * @rule_idx: rule_idx - * @cam_offset: cam_offset - */ -static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - struct bnx2x_exeq_elem *elem, - int rule_idx, int cam_offset) -{ - struct bnx2x_raw_obj *raw = &o->raw; - struct mac_configuration_cmd *config = - (struct mac_configuration_cmd *)(raw->rdata); - /* - * 57710 and 57711 do not support MOVE command, - * so it's either ADD or DEL - */ - bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? - true : false; - - /* Reset the ramrod data buffer */ - memset(config, 0, sizeof(*config)); - - bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING, - cam_offset, add, - elem->cmd_data.vlan_mac.u.vlan_mac.mac, - elem->cmd_data.vlan_mac.u.vlan_mac.vlan, - ETH_VLAN_FILTER_CLASSIFY, config); -} - -#define list_next_entry(pos, member) \ - list_entry((pos)->member.next, typeof(*(pos)), member) - /** * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element * * @bp: device handle * @p: command parameters - * @ppos: pointer to the cooky + * @ppos: pointer to the cookie * * reconfigure next MAC/VLAN/VLAN-MAC element from the * previously configured elements list. @@ -1030,7 +1049,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken * into an account * - * pointer to the cooky - that should be given back in the next call to make + * pointer to the cookie - that should be given back in the next call to make * function handle the next element. If *ppos is set to NULL it will restart the * iterator. If returned *ppos == NULL this means that the last element has been * handled. @@ -1078,8 +1097,7 @@ static int bnx2x_vlan_mac_restore(struct bnx2x *bp, return bnx2x_config_vlan_mac(bp, p); } -/* - * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a +/* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a * pointer to an element with a specific criteria and NULL if such an element * hasn't been found. */ @@ -1117,24 +1135,6 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( return NULL; } -static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac( - struct bnx2x_exe_queue_obj *o, - struct bnx2x_exeq_elem *elem) -{ - struct bnx2x_exeq_elem *pos; - struct bnx2x_vlan_mac_ramrod_data *data = - &elem->cmd_data.vlan_mac.u.vlan_mac; - - /* Check pending for execution commands */ - list_for_each_entry(pos, &o->exe_queue, link) - if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, - sizeof(*data)) && - (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) - return pos; - - return NULL; -} - /** * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed * @@ -1157,15 +1157,13 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, int rc; /* Check the registry */ - rc = o->check_add(o, &elem->cmd_data.vlan_mac.u); + rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u); if (rc) { - DP(BNX2X_MSG_SP, "ADD command is not allowed considering " - "current registry state\n"); + DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n"); return rc; } - /* - * Check if there is a pending ADD command for this + /* Check if there is a pending ADD command for this * MAC/VLAN/VLAN-MAC. Return an error if there is. */ if (exeq->get(exeq, elem)) { @@ -1173,8 +1171,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, return -EEXIST; } - /* - * TODO: Check the pending MOVE from other objects where this + /* TODO: Check the pending MOVE from other objects where this * object is a destination object. */ @@ -1211,15 +1208,13 @@ static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, /* If this classification can not be deleted (doesn't exist) * - return a BNX2X_EXIST. */ - pos = o->check_del(o, &elem->cmd_data.vlan_mac.u); + pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); if (!pos) { - DP(BNX2X_MSG_SP, "DEL command is not allowed considering " - "current registry state\n"); + DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n"); return -EEXIST; } - /* - * Check if there are pending DEL or MOVE commands for this + /* Check if there are pending DEL or MOVE commands for this * MAC/VLAN/VLAN-MAC. Return an error if so. */ memcpy(&query_elem, elem, sizeof(query_elem)); @@ -1270,18 +1265,16 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; - /* - * Check if we can perform this operation based on the current registry + /* Check if we can perform this operation based on the current registry * state. */ - if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) { - DP(BNX2X_MSG_SP, "MOVE command is not allowed considering " - "current registry state\n"); + if (!src_o->check_move(bp, src_o, dest_o, + &elem->cmd_data.vlan_mac.u)) { + DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n"); return -EINVAL; } - /* - * Check if there is an already pending DEL or MOVE command for the + /* Check if there is an already pending DEL or MOVE command for the * source object or ADD command for a destination object. Return an * error if so. */ @@ -1290,8 +1283,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, /* Check DEL on source */ query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; if (src_exeq->get(src_exeq, &query_elem)) { - BNX2X_ERR("There is a pending DEL command on the source " - "queue already\n"); + BNX2X_ERR("There is a pending DEL command on the source queue already\n"); return -EINVAL; } @@ -1304,8 +1296,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, /* Check ADD on destination */ query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; if (dest_exeq->get(dest_exeq, &query_elem)) { - BNX2X_ERR("There is a pending ADD command on the " - "destination queue already\n"); + BNX2X_ERR("There is a pending ADD command on the destination queue already\n"); return -EINVAL; } @@ -1372,7 +1363,7 @@ static int bnx2x_remove_vlan_mac(struct bnx2x *bp, } /** - * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. + * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes. * * @bp: device handle * @o: bnx2x_vlan_mac_obj @@ -1393,7 +1384,7 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp, /* Wait until there are no pending commands */ if (!bnx2x_exe_queue_empty(exeq)) - usleep_range(1000, 1000); + usleep_range(1000, 2000); else return 0; } @@ -1401,6 +1392,32 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp, return -EBUSY; } +static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long *ramrod_flags) +{ + int rc = 0; + + spin_lock_bh(&o->exe_queue.lock); + + DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n"); + rc = __bnx2x_vlan_mac_h_write_trylock(bp, o); + + if (rc != 0) { + __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags); + + /* Calling function should not diffrentiate between this case + * and the case in which there is already a pending ramrod + */ + rc = 1; + } else { + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + } + spin_unlock_bh(&o->exe_queue.lock); + + return rc; +} + /** * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod * @@ -1418,19 +1435,27 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp, struct bnx2x_raw_obj *r = &o->raw; int rc; + /* Clearing the pending list & raw state should be made + * atomically (as execution flow assumes they represent the same). + */ + spin_lock_bh(&o->exe_queue.lock); + /* Reset pending list */ - bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); + __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); /* Clear pending */ r->clear_pending(r); + spin_unlock_bh(&o->exe_queue.lock); + /* If ramrod failed this is most likely a SW bug */ if (cqe->message.error) return -EINVAL; - /* Run the next bulk of pending commands if requeted */ + /* Run the next bulk of pending commands if requested */ if (test_bit(RAMROD_CONT, ramrod_flags)) { - rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags); + if (rc < 0) return rc; } @@ -1480,12 +1505,10 @@ static int bnx2x_optimize_vlan_mac(struct bnx2x *bp, &pos->cmd_data.vlan_mac.vlan_mac_flags)) { if ((query.cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) { - BNX2X_ERR("Failed to return the credit for the " - "optimized ADD command\n"); + BNX2X_ERR("Failed to return the credit for the optimized ADD command\n"); return -EINVAL; } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ - BNX2X_ERR("Failed to recover the credit from " - "the optimized DEL command\n"); + BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n"); return -EINVAL; } } @@ -1520,7 +1543,7 @@ static inline int bnx2x_vlan_mac_get_registry_elem( bool restore, struct bnx2x_vlan_mac_registry_elem **re) { - int cmd = elem->cmd_data.vlan_mac.cmd; + enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; struct bnx2x_vlan_mac_registry_elem *reg_elem; /* Allocate a new registry element if needed. */ @@ -1532,9 +1555,8 @@ static inline int bnx2x_vlan_mac_get_registry_elem( /* Get a new CAM offset */ if (!o->get_cam_offset(o, ®_elem->cam_offset)) { - /* - * This shell never happen, because we have checked the - * CAM availiability in the 'validate'. + /* This shall never happen, because we have checked the + * CAM availability in the 'validate'. */ WARN_ON(1); kfree(reg_elem); @@ -1551,7 +1573,7 @@ static inline int bnx2x_vlan_mac_get_registry_elem( reg_elem->vlan_mac_flags = elem->cmd_data.vlan_mac.vlan_mac_flags; } else /* DEL, RESTORE */ - reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u); + reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); *re = reg_elem; return 0; @@ -1579,10 +1601,9 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, bool restore = test_bit(RAMROD_RESTORE, ramrod_flags); bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags); struct bnx2x_vlan_mac_registry_elem *reg_elem; - int cmd; + enum bnx2x_vlan_mac_cmd cmd; - /* - * If DRIVER_ONLY execution is requested, cleanup a registry + /* If DRIVER_ONLY execution is requested, cleanup a registry * and exit. Otherwise send a ramrod to FW. */ if (!drv_only) { @@ -1591,11 +1612,10 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, /* Set pending */ r->set_pending(r); - /* Fill tha ramrod data */ + /* Fill the ramrod data */ list_for_each_entry(elem, exe_chunk, link) { cmd = elem->cmd_data.vlan_mac.cmd; - /* - * We will add to the target object in MOVE command, so + /* We will add to the target object in MOVE command, so * change the object for a CAM search. */ if (cmd == BNX2X_VLAN_MAC_MOVE) @@ -1628,12 +1648,11 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, idx++; } - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, @@ -1649,7 +1668,8 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, cmd = elem->cmd_data.vlan_mac.cmd; if ((cmd == BNX2X_VLAN_MAC_DEL) || (cmd == BNX2X_VLAN_MAC_MOVE)) { - reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u); + reg_elem = o->check_del(bp, o, + &elem->cmd_data.vlan_mac.u); WARN_ON(!reg_elem); @@ -1680,7 +1700,7 @@ error_exit: if (!restore && ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) { - reg_elem = o->check_del(cam_obj, + reg_elem = o->check_del(bp, cam_obj, &elem->cmd_data.vlan_mac.u); if (reg_elem) { list_del(®_elem->link); @@ -1728,9 +1748,8 @@ static inline int bnx2x_vlan_mac_push_new_cmd( * @p: * */ -int bnx2x_config_vlan_mac( - struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *p) +int bnx2x_config_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p) { int rc = 0; struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; @@ -1747,34 +1766,31 @@ int bnx2x_config_vlan_mac( return rc; } - /* - * If nothing will be executed further in this iteration we want to + /* If nothing will be executed further in this iteration we want to * return PENDING if there are pending commands */ if (!bnx2x_exe_queue_empty(&o->exe_queue)) rc = 1; if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { - DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: " - "clearing a pending bit.\n"); + DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n"); raw->clear_pending(raw); } /* Execute commands if required */ if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { - rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj, + &p->ramrod_flags); if (rc < 0) return rc; } - /* - * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set + /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set * then user want to wait until the last command is done. */ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { - /* - * Wait maximum for the current exe_queue length iterations plus + /* Wait maximum for the current exe_queue length iterations plus * one (for the current pending command). */ int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; @@ -1788,8 +1804,9 @@ int bnx2x_config_vlan_mac( return rc; /* Make a next step */ - rc = bnx2x_exe_queue_step(bp, &o->exe_queue, - ramrod_flags); + rc = __bnx2x_vlan_mac_execute_step(bp, + p->vlan_mac_obj, + &p->ramrod_flags); if (rc < 0) return rc; } @@ -1800,8 +1817,6 @@ int bnx2x_config_vlan_mac( return rc; } - - /** * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec * @@ -1811,7 +1826,7 @@ int bnx2x_config_vlan_mac( * @ramrod_flags: execution flags to be used for this deletion * * if the last operation has completed successfully and there are no - * moreelements left, positive value if the last operation has completed + * more elements left, positive value if the last operation has completed * successfully and there are more previously configured elements, negative * value is current operation has failed. */ @@ -1821,24 +1836,29 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, unsigned long *ramrod_flags) { struct bnx2x_vlan_mac_registry_elem *pos = NULL; - int rc = 0; struct bnx2x_vlan_mac_ramrod_params p; struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; + unsigned long flags; + int read_lock; + int rc = 0; /* Clear pending commands first */ spin_lock_bh(&exeq->lock); list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { - if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == - *vlan_mac_flags) { + flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags; + if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == + BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { rc = exeq->remove(bp, exeq->owner, exeq_pos); if (rc) { BNX2X_ERR("Failed to remove command\n"); + spin_unlock_bh(&exeq->lock); return rc; } list_del(&exeq_pos->link); + bnx2x_exe_queue_free_elem(bp, exeq_pos); } } @@ -1850,26 +1870,36 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, p.ramrod_flags = *ramrod_flags; p.user_req.cmd = BNX2X_VLAN_MAC_DEL; - /* - * Add all but the last VLAN-MAC to the execution queue without actually + /* Add all but the last VLAN-MAC to the execution queue without actually * execution anything. */ __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); __clear_bit(RAMROD_EXEC, &p.ramrod_flags); __clear_bit(RAMROD_CONT, &p.ramrod_flags); + DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n"); + read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); + if (read_lock != 0) + return read_lock; + list_for_each_entry(pos, &o->head, link) { - if (pos->vlan_mac_flags == *vlan_mac_flags) { + flags = pos->vlan_mac_flags; + if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == + BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { p.user_req.vlan_mac_flags = pos->vlan_mac_flags; memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); rc = bnx2x_config_vlan_mac(bp, &p); if (rc < 0) { BNX2X_ERR("Failed to add a new DEL command\n"); + bnx2x_vlan_mac_h_read_unlock(bp, o); return rc; } } } + DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n"); + bnx2x_vlan_mac_h_read_unlock(bp, o); + p.ramrod_flags = *ramrod_flags; __set_bit(RAMROD_CONT, &p.ramrod_flags); @@ -1901,6 +1931,9 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, struct bnx2x_credit_pool_obj *vlans_pool) { INIT_LIST_HEAD(&o->head); + o->head_reader = 0; + o->head_exe_request = false; + o->saved_ramrod_flags = 0; o->macs_pool = macs_pool; o->vlans_pool = vlans_pool; @@ -1914,7 +1947,6 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, state, pstate, type); } - void bnx2x_init_mac_obj(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *mac_obj, u8 cl_id, u32 cid, u8 func_id, void *rdata, @@ -1997,6 +2029,7 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp, vlan_obj->check_move = bnx2x_check_move; vlan_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; + vlan_obj->get_n_elements = bnx2x_get_n_elements; /* Exe Queue */ bnx2x_exe_queue_init(bp, @@ -2009,71 +2042,6 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp, } } -void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *vlan_mac_obj, - u8 cl_id, u32 cid, u8 func_id, void *rdata, - dma_addr_t rdata_mapping, int state, - unsigned long *pstate, bnx2x_obj_type type, - struct bnx2x_credit_pool_obj *macs_pool, - struct bnx2x_credit_pool_obj *vlans_pool) -{ - union bnx2x_qable_obj *qable_obj = - (union bnx2x_qable_obj *)vlan_mac_obj; - - bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, - rdata_mapping, state, pstate, type, - macs_pool, vlans_pool); - - /* CAM pool handling */ - vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; - vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; - /* - * CAM offset is relevant for 57710 and 57711 chips only which have a - * single CAM for both MACs and VLAN-MAC pairs. So the offset - * will be taken from MACs' pool object only. - */ - vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; - vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; - - if (CHIP_IS_E1(bp)) { - BNX2X_ERR("Do not support chips others than E2\n"); - BUG(); - } else if (CHIP_IS_E1H(bp)) { - vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h; - vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; - vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; - vlan_mac_obj->check_move = bnx2x_check_move_always_err; - vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; - - /* Exe Queue */ - bnx2x_exe_queue_init(bp, - &vlan_mac_obj->exe_queue, 1, qable_obj, - bnx2x_validate_vlan_mac, - bnx2x_remove_vlan_mac, - bnx2x_optimize_vlan_mac, - bnx2x_execute_vlan_mac, - bnx2x_exeq_get_vlan_mac); - } else { - vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2; - vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; - vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; - vlan_mac_obj->check_move = bnx2x_check_move; - vlan_mac_obj->ramrod_cmd = - RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; - - /* Exe Queue */ - bnx2x_exe_queue_init(bp, - &vlan_mac_obj->exe_queue, - CLASSIFY_RULES_COUNT, - qable_obj, bnx2x_validate_vlan_mac, - bnx2x_remove_vlan_mac, - bnx2x_optimize_vlan_mac, - bnx2x_execute_vlan_mac, - bnx2x_exeq_get_vlan_mac); - } - -} - /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ static inline void __storm_memset_mac_filters(struct bnx2x *bp, struct tstorm_eth_mac_filter_config *mac_filters, @@ -2090,18 +2058,18 @@ static inline void __storm_memset_mac_filters(struct bnx2x *bp, static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, struct bnx2x_rx_mode_ramrod_params *p) { - /* update the bp MAC filter structure */ + /* update the bp MAC filter structure */ u32 mask = (1 << p->cl_id); struct tstorm_eth_mac_filter_config *mac_filters = (struct tstorm_eth_mac_filter_config *)p->rdata; - /* initial seeting is drop-all */ + /* initial setting is drop-all */ u8 drop_all_ucast = 1, drop_all_mcast = 1; u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; u8 unmatched_unicast = 0; - /* In e1x there we only take into account rx acceot flag since tx switching + /* In e1x there we only take into account rx accept flag since tx switching * isn't enabled. */ if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) /* accept matched ucast */ @@ -2154,18 +2122,16 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" "accp_mcast 0x%x\naccp_bcast 0x%x\n", - mac_filters->ucast_drop_all, - mac_filters->mcast_drop_all, - mac_filters->ucast_accept_all, - mac_filters->mcast_accept_all, - mac_filters->bcast_accept_all); + mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, + mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, + mac_filters->bcast_accept_all); /* write the MAC filter structure*/ __storm_memset_mac_filters(bp, mac_filters, p->func_id); /* The operation is completed */ clear_bit(p->state, p->pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return 0; } @@ -2175,12 +2141,12 @@ static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid, struct eth_classify_header *hdr, u8 rule_cnt) { - hdr->echo = cid; + hdr->echo = cpu_to_le32(cid); hdr->rule_cnt = rule_cnt; } static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, - unsigned long accept_flags, + unsigned long *accept_flags, struct eth_filter_rules_cmd *cmd, bool clear_accept_all) { @@ -2190,33 +2156,33 @@ static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; - if (accept_flags) { - if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags)) - state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags)) + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; - if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags)) - state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags)) + state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; - if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) { - state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; - state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; - } + if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) { + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; + } - if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) { - state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; - state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; - } - if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags)) - state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; + if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) { + state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + } - if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) { - state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; - state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; - } - if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags)) - state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; + if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags)) + state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; + + if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) { + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; } + if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags)) + state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; + /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ if (clear_accept_all) { state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; @@ -2226,7 +2192,6 @@ static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, } cmd->state = cpu_to_le16(state); - } static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, @@ -2249,8 +2214,9 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, data->rules[rule_idx].cmd_general_data = ETH_FILTER_RULES_CMD_TX_CMD; - bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, - &(data->rules[rule_idx++]), false); + bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags, + &(data->rules[rule_idx++]), + false); } /* Rx */ @@ -2261,13 +2227,12 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, data->rules[rule_idx].cmd_general_data = ETH_FILTER_RULES_CMD_RX_CMD; - bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, - &(data->rules[rule_idx++]), false); + bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags, + &(data->rules[rule_idx++]), + false); } - - /* - * If FCoE Queue configuration has been requested configure the Rx and + /* If FCoE Queue configuration has been requested configure the Rx and * internal switching modes for this queue in separate rules. * * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: @@ -2282,9 +2247,10 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, data->rules[rule_idx].cmd_general_data = ETH_FILTER_RULES_CMD_TX_CMD; - bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, - &(data->rules[rule_idx++]), + bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags, + &(data->rules[rule_idx]), true); + rule_idx++; } /* Rx */ @@ -2295,29 +2261,27 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, data->rules[rule_idx].cmd_general_data = ETH_FILTER_RULES_CMD_RX_CMD; - bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, - &(data->rules[rule_idx++]), + bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags, + &(data->rules[rule_idx]), true); + rule_idx++; } } - /* - * Set the ramrod header (most importantly - number of rules to + /* Set the ramrod header (most importantly - number of rules to * configure). */ bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); - DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, " - "tx_accept_flags 0x%lx\n", + DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n", data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -2419,7 +2383,7 @@ static int bnx2x_mcast_wait(struct bnx2x *bp, static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { int total_sz; struct bnx2x_pending_mcast_cmd *new_cmd; @@ -2441,8 +2405,8 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, if (!new_cmd) return -ENOMEM; - DP(BNX2X_MSG_SP, "About to enqueue a new %d command. " - "macs_list_len=%d\n", cmd, macs_list_len); + DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n", + cmd, macs_list_len); INIT_LIST_HEAD(&new_cmd->data.macs_head); @@ -2454,7 +2418,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, cur_mac = (struct bnx2x_mcast_mac_elem *) ((u8 *)new_cmd + sizeof(*new_cmd)); - /* Push the MACs of the current command into the pendig command + /* Push the MACs of the current command into the pending command * MACs list: FIFO */ list_for_each_entry(pos, &p->mcast_list, link) { @@ -2474,6 +2438,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, break; default: + kfree(new_cmd); BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; } @@ -2550,7 +2515,7 @@ static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o) static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, struct bnx2x_mcast_obj *o, int idx, union bnx2x_mcast_config_data *cfg_data, - int cmd) + enum bnx2x_mcast_cmd cmd) { struct bnx2x_raw_obj *r = &o->raw; struct eth_multicast_rules_ramrod_data *data = @@ -2614,7 +2579,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e2( int *rdata_idx) { int cur_bin, cnt = *rdata_idx; - union bnx2x_mcast_config_data cfg_data = {0}; + union bnx2x_mcast_config_data cfg_data = {NULL}; /* go through the registry and configure the bins from it */ for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0; @@ -2646,7 +2611,7 @@ static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, { struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; int cnt = *line_idx; - union bnx2x_mcast_config_data cfg_data = {0}; + union bnx2x_mcast_config_data cfg_data = {NULL}; list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, link) { @@ -2657,7 +2622,7 @@ static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, cnt++; DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", - pmac_pos->mac); + pmac_pos->mac); list_del(&pmac_pos->link); @@ -2769,7 +2734,7 @@ static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, int *line_idx) { struct bnx2x_mcast_list_elem *mlist_pos; - union bnx2x_mcast_config_data cfg_data = {0}; + union bnx2x_mcast_config_data cfg_data = {NULL}; int cnt = *line_idx; list_for_each_entry(mlist_pos, &p->mcast_list, link) { @@ -2779,7 +2744,7 @@ static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, cnt++; DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", - mlist_pos->mac); + mlist_pos->mac); } *line_idx = cnt; @@ -2816,7 +2781,8 @@ static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp, * Returns number of lines filled in the ramrod data in total. */ static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, int cmd, + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd, int start_cnt) { struct bnx2x_mcast_obj *o = p->mcast_obj; @@ -2850,7 +2816,7 @@ static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, static int bnx2x_mcast_validate_e2(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { struct bnx2x_mcast_obj *o = p->mcast_obj; int reg_sz = o->get_registry_size(o); @@ -2885,7 +2851,6 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp, default: BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; - } /* Increase the total number of MACs pending to be configured */ @@ -2919,8 +2884,9 @@ static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp, struct eth_multicast_rules_ramrod_data *data = (struct eth_multicast_rules_ramrod_data *)(r->rdata); - data->header.echo = ((r->cid & BNX2X_SWCID_MASK) | - (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); + data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | + (BNX2X_FILTER_MCAST_PENDING << + BNX2X_SWCID_SHIFT)); data->header.rule_cnt = len; } @@ -2954,7 +2920,7 @@ static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp, static int bnx2x_mcast_setup_e2(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { struct bnx2x_raw_obj *raw = &p->mcast_obj->raw; struct bnx2x_mcast_obj *o = p->mcast_obj; @@ -3009,20 +2975,18 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp, if (!o->total_pending_num) bnx2x_mcast_refresh_registry_e2(bp, o); - /* - * If CLEAR_ONLY was requested - don't send a ramrod and clear + /* If CLEAR_ONLY was requested - don't send a ramrod and clear * RAMROD_PENDING status immediately. */ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { raw->clear_pending(raw); return 0; } else { - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -3040,7 +3004,7 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp, static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { /* Mark, that there is a work to do */ if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE)) @@ -3074,7 +3038,7 @@ static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp, BNX2X_57711_SET_MC_FILTER(mc_filter, bit); DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n", - mlist_pos->mac, bit); + mlist_pos->mac, bit); /* bookkeeping... */ BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, @@ -3096,13 +3060,13 @@ static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, } } -/* On 57711 we write the multicast MACs' aproximate match +/* On 57711 we write the multicast MACs' approximate match * table by directly into the TSTORM's internal RAM. So we don't * really need to handle any tricks to make it work. */ static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { int i; struct bnx2x_mcast_obj *o = p->mcast_obj; @@ -3156,7 +3120,7 @@ static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, static int bnx2x_mcast_validate_e1(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { struct bnx2x_mcast_obj *o = p->mcast_obj; int reg_sz = o->get_registry_size(o); @@ -3181,8 +3145,8 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, * matter. */ if (p->mcast_list_len > o->max_cmd_len) { - BNX2X_ERR("Can't configure more than %d multicast MACs" - "on 57710\n", o->max_cmd_len); + BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n", + o->max_cmd_len); return -EINVAL; } /* Every configured MAC should be cleared if DEL command is @@ -3198,7 +3162,6 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, default: BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; - } /* We want to ensure that commands are executed one by one for 57710. @@ -3220,7 +3183,7 @@ static void bnx2x_mcast_revert_e1(struct bnx2x *bp, /* If current command hasn't been handled yet and we are * here means that it's meant to be dropped and we have to - * update the number of outstandling MACs accordingly. + * update the number of outstanding MACs accordingly. */ if (p->mcast_list_len) o->total_pending_num -= o->max_cmd_len; @@ -3229,7 +3192,7 @@ static void bnx2x_mcast_revert_e1(struct bnx2x *bp, static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp, struct bnx2x_mcast_obj *o, int idx, union bnx2x_mcast_config_data *cfg_data, - int cmd) + enum bnx2x_mcast_cmd cmd) { struct bnx2x_raw_obj *r = &o->raw; struct mac_configuration_cmd *data = @@ -3273,9 +3236,10 @@ static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp, BNX2X_MAX_MULTICAST*(1 + r->func_id)); data->hdr.offset = offset; - data->hdr.client_id = 0xff; - data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) | - (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); + data->hdr.client_id = cpu_to_le16(0xff); + data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | + (BNX2X_FILTER_MCAST_PENDING << + BNX2X_SWCID_SHIFT)); data->hdr.length = len; } @@ -3298,7 +3262,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1( { struct bnx2x_mcast_mac_elem *elem; int i = 0; - union bnx2x_mcast_config_data cfg_data = {0}; + union bnx2x_mcast_config_data cfg_data = {NULL}; /* go through the registry and configure the MACs from it. */ list_for_each_entry(elem, &o->registry.exact_match.macs, link) { @@ -3308,7 +3272,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1( i++; DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", - cfg_data.mac); + cfg_data.mac); } *rdata_idx = i; @@ -3316,17 +3280,15 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1( return -1; } - static inline int bnx2x_mcast_handle_pending_cmds_e1( struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) { struct bnx2x_pending_mcast_cmd *cmd_pos; struct bnx2x_mcast_mac_elem *pmac_pos; struct bnx2x_mcast_obj *o = p->mcast_obj; - union bnx2x_mcast_config_data cfg_data = {0}; + union bnx2x_mcast_config_data cfg_data = {NULL}; int cnt = 0; - /* If nothing to be done - return */ if (list_empty(&o->pending_cmds_head)) return 0; @@ -3344,7 +3306,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1( cnt++; DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", - pmac_pos->mac); + pmac_pos->mac); } break; @@ -3430,7 +3392,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, &data->config_table[i].lsb_mac_addr, elem->mac); DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n", - elem->mac); + elem->mac); list_add_tail(&elem->link, &o->registry.exact_match.macs); } @@ -3447,7 +3409,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, static int bnx2x_mcast_setup_e1(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { struct bnx2x_mcast_obj *o = p->mcast_obj; struct bnx2x_raw_obj *raw = &o->raw; @@ -3497,20 +3459,18 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp, if (rc) return rc; - /* - * If CLEAR_ONLY was requested - don't send a ramrod and clear + /* If CLEAR_ONLY was requested - don't send a ramrod and clear * RAMROD_PENDING status immediately. */ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { raw->clear_pending(raw); return 0; } else { - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -3524,7 +3484,6 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp, /* Ramrod completion is pending */ return 1; } - } static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) @@ -3551,7 +3510,7 @@ static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o, int bnx2x_config_mcast(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int cmd) + enum bnx2x_mcast_cmd cmd) { struct bnx2x_mcast_obj *o = p->mcast_obj; struct bnx2x_raw_obj *r = &o->raw; @@ -3571,9 +3530,8 @@ int bnx2x_config_mcast(struct bnx2x *bp, if ((!p->mcast_list_len) && (!o->check_sched(o))) return 0; - DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d " - "o->max_cmd_len=%d\n", o->total_pending_num, - p->mcast_list_len, o->max_cmd_len); + DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n", + o->total_pending_num, p->mcast_list_len, o->max_cmd_len); /* Enqueue the current command to the pending list if we can't complete * it in the current iteration @@ -3618,16 +3576,16 @@ error_exit1: static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(o->sched_state, o->raw.pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(o->sched_state, o->raw.pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) @@ -3823,7 +3781,6 @@ static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o, return true; } - static bool bnx2x_credit_pool_get_entry( struct bnx2x_credit_pool_obj *o, int *offset) @@ -3840,7 +3797,7 @@ static bool bnx2x_credit_pool_get_entry( continue; /* If we've got here we are going to find a free entry */ - for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0; + for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0; i < BIT_VEC64_ELEM_SZ; idx++, i++) if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { @@ -3974,8 +3931,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, } else { - /* - * CAM credit is equaly divided between all active functions + /* CAM credit is equaly divided between all active functions * on the PATH. */ if ((func_num > 0)) { @@ -3984,8 +3940,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, else cam_sz = BNX2X_CAM_SIZE_EMUL; - /* - * No need for CAM entries handling for 57712 and + /* No need for CAM entries handling for 57712 and * newer. */ bnx2x_init_credit_pool(p, -1, cam_sz); @@ -3993,7 +3948,6 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, /* this should never happen! Block MAC operations. */ bnx2x_init_credit_pool(p, 0, 0); } - } } @@ -4003,14 +3957,12 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, u8 func_num) { if (CHIP_IS_E1x(bp)) { - /* - * There is no VLAN credit in HW on 57710 and 57711 only + /* There is no VLAN credit in HW on 57710 and 57711 only * MAC / MAC-VLAN can be set */ bnx2x_init_credit_pool(p, 0, -1); } else { - /* - * CAM credit is equaly divided between all active functions + /* CAM credit is equally divided between all active functions * on the PATH. */ if (func_num > 0) { @@ -4026,7 +3978,7 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, /** * bnx2x_debug_print_ind_table - prints the indirection table configuration. * - * @bp: driver hanlde + * @bp: driver handle * @p: pointer to rss configuration * * Prints it when NETIF_MSG_IFUP debug level is configured. @@ -4075,20 +4027,14 @@ static int bnx2x_setup_rss(struct bnx2x *bp, DP(BNX2X_MSG_SP, "Configuring RSS\n"); /* Set an echo field */ - data->echo = (r->cid & BNX2X_SWCID_MASK) | - (r->state << BNX2X_SWCID_SHIFT); + data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | + (r->state << BNX2X_SWCID_SHIFT)); /* RSS mode */ if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags)) rss_mode = ETH_RSS_MODE_DISABLED; else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) rss_mode = ETH_RSS_MODE_REGULAR; - else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags)) - rss_mode = ETH_RSS_MODE_VLAN_PRI; - else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags)) - rss_mode = ETH_RSS_MODE_E1HOV_PRI; - else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags)) - rss_mode = ETH_RSS_MODE_IP_DSCP; data->rss_mode = rss_mode; @@ -4103,6 +4049,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp, data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; + if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; + if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; @@ -4111,6 +4061,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp, data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; + if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; + /* Hashing mask */ data->rss_result_mask = p->rss_result_mask; @@ -4137,12 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp, data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; } - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -4171,8 +4124,11 @@ int bnx2x_config_rss(struct bnx2x *bp, struct bnx2x_raw_obj *r = &o->raw; /* Do nothing if only driver cleanup was requested */ - if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) + if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n", + p->ramrod_flags); return 0; + } r->set_pending(r); @@ -4188,7 +4144,6 @@ int bnx2x_config_rss(struct bnx2x *bp, return rc; } - void bnx2x_init_rss_config_obj(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, u8 cl_id, u32 cid, u8 func_id, u8 engine_id, @@ -4225,11 +4180,16 @@ int bnx2x_queue_state_change(struct bnx2x *bp, unsigned long *pending = &o->pending; /* Check that the requested transition is legal */ - if (o->check_transition(bp, o, params)) + rc = o->check_transition(bp, o, params); + if (rc) { + BNX2X_ERR("check transition returned an error. rc %d\n", rc); return -EINVAL; + } /* Set "pending" bit */ + DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending); pending_bit = o->set_pending(o, params); + DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending); /* Don't send a command if only driver cleanup was requested */ if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) @@ -4240,7 +4200,7 @@ int bnx2x_queue_state_change(struct bnx2x *bp, if (rc) { o->next_state = BNX2X_Q_STATE_MAX; clear_bit(pending_bit, pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return rc; } @@ -4256,7 +4216,6 @@ int bnx2x_queue_state_change(struct bnx2x *bp, return !!test_bit(pending_bit, pending); } - static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, struct bnx2x_queue_state_params *params) { @@ -4298,27 +4257,26 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp, unsigned long cur_pending = o->pending; if (!test_and_clear_bit(cmd, &cur_pending)) { - BNX2X_ERR("Bad MC reply %d for queue %d in state %d " - "pending 0x%lx, next_state %d\n", cmd, - o->cids[BNX2X_PRIMARY_CID_INDEX], + BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n", + cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->state, cur_pending, o->next_state); return -EINVAL; } if (o->next_tx_only >= o->max_cos) - /* >= becuase tx only must always be smaller than cos since the - * primary connection suports COS 0 + /* >= because tx only must always be smaller than cos since the + * primary connection supports COS 0 */ BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", o->next_tx_only, o->max_cos); - DP(BNX2X_MSG_SP, "Completing command %d for queue %d, " - "setting state to %d\n", cmd, - o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state); + DP(BNX2X_MSG_SP, + "Completing command %d for queue %d, setting state to %d\n", + cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state); if (o->next_tx_only) /* print num tx-only if any exist */ DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n", - o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only); + o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only); o->state = o->next_state; o->num_tx_only = o->next_tx_only; @@ -4330,7 +4288,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp, wmb(); clear_bit(cmd, &o->pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return 0; } @@ -4372,7 +4330,6 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, gen_data->mtu = cpu_to_le16(params->mtu); gen_data->func_id = o->func_id; - gen_data->cos = params->cos; gen_data->traffic_type = @@ -4398,6 +4355,15 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, test_bit(BNX2X_Q_FLG_TX_SWITCH, flags); tx_data->anti_spoofing_flg = test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); + tx_data->force_default_pri_flg = + test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); + + tx_data->tunnel_lso_inc_ip_id = + test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags); + tx_data->tunnel_non_lso_pcsum_location = + test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT : + PCSUM_ON_BD; + tx_data->tx_status_block_id = params->fw_sb_id; tx_data->tx_sb_index_number = params->sb_cq_index; tx_data->tss_leading_client_id = params->tss_leading_cl_id; @@ -4430,9 +4396,10 @@ static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o, struct client_init_rx_data *rx_data, unsigned long *flags) { - /* Rx data */ rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) * CLIENT_INIT_RX_DATA_TPA_EN_IPV4; + rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) * + CLIENT_INIT_RX_DATA_TPA_MODE; rx_data->vmqueue_mode_en_flg = 0; rx_data->cache_line_alignment_log_size = @@ -4476,7 +4443,7 @@ static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o, rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags); if (test_bit(BNX2X_Q_FLG_MCAST, flags)) { - rx_data->approx_mcast_engine_id = o->func_id; + rx_data->approx_mcast_engine_id = params->mcast_engine_id; rx_data->is_approx_mcast = 1; } @@ -4489,7 +4456,6 @@ static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o, cpu_to_le16(params->silent_removal_value); rx_data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); - } /* initialize the general, tx and rx parts of a queue object */ @@ -4532,8 +4498,10 @@ static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp, &data->tx, &cmd_params->params.tx_only.flags); - DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x\n",cmd_params->q_obj->cids[0], - data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi); + DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x", + cmd_params->q_obj->cids[0], + data->tx.tx_bd_page_base.lo, + data->tx.tx_bd_page_base.hi); } /** @@ -4609,14 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, /* Fill the ramrod data */ bnx2x_q_fill_setup_data_cmn(bp, params, rdata); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ - return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], U64_HI(data_mapping), U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -4638,14 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, bnx2x_q_fill_setup_data_cmn(bp, params, rdata); bnx2x_q_fill_setup_data_e2(bp, params, rdata); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ - return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], U64_HI(data_mapping), U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -4663,7 +4627,6 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, ¶ms->params.tx_only; u8 cid_index = tx_only_params->cid_index; - if (cid_index >= o->max_cos) { BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", o->cl_id, cid_index); @@ -4680,20 +4643,16 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, /* Fill the ramrod data */ bnx2x_q_fill_setup_tx_only(bp, params, rdata); - DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d," - "sp-client id %d, cos %d\n", - o->cids[cid_index], - rdata->general.client_id, + DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n", + o->cids[cid_index], rdata->general.client_id, rdata->general.sp_client_id, rdata->general.cos); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ - return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], U64_HI(data_mapping), U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -4720,7 +4679,7 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp, test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, ¶ms->update_flags); - /* Outer VLAN sripping */ + /* Outer VLAN stripping */ data->outer_vlan_removal_enable_flg = test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); data->outer_vlan_removal_change_flg = @@ -4756,6 +4715,13 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp, test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags); data->silent_vlan_value = cpu_to_le16(params->silent_removal_value); data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); + + /* tx switching */ + data->tx_switching_flg = + test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, ¶ms->update_flags); + data->tx_switching_change_flg = + test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, + ¶ms->update_flags); } static inline int bnx2x_q_send_update(struct bnx2x *bp, @@ -4775,21 +4741,18 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp, return -EINVAL; } - /* Clear the ramrod data */ memset(rdata, 0, sizeof(*rdata)); /* Fill the ramrod data */ bnx2x_q_fill_update_data(bp, o, update_params, rdata); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ - return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, o->cids[cid_index], U64_HI(data_mapping), U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -4836,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp, return bnx2x_q_send_update(bp, params); } +static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj, + struct bnx2x_queue_update_tpa_params *params, + struct tpa_update_ramrod_data *data) +{ + data->client_id = obj->cl_id; + data->complete_on_both_clients = params->complete_on_both_clients; + data->dont_verify_rings_pause_thr_flg = + params->dont_verify_thr; + data->max_agg_size = cpu_to_le16(params->max_agg_sz); + data->max_sges_for_packet = params->max_sges_pkt; + data->max_tpa_queues = params->max_tpa_queues; + data->sge_buff_size = cpu_to_le16(params->sge_buff_sz); + data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map)); + data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map)); + data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high); + data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low); + data->tpa_mode = params->tpa_mode; + data->update_ipv4 = params->update_ipv4; + data->update_ipv6 = params->update_ipv6; +} + static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, struct bnx2x_queue_state_params *params) { - /* TODO: Not implemented yet. */ - return -1; + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct tpa_update_ramrod_data *rdata = + (struct tpa_update_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_queue_update_tpa_params *update_tpa_params = + ¶ms->params.update_tpa; + u16 type; + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata); + + /* Add the function id inside the type, so that sp post function + * doesn't automatically add the PF func-id, this is required + * for operations done by PFs on behalf of their VFs + */ + type = ETH_CONNECTION_TYPE | + ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT); + + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE, + o->cids[BNX2X_PRIMARY_CID_INDEX], + U64_HI(data_mapping), + U64_LO(data_mapping), type); } static inline int bnx2x_q_send_halt(struct bnx2x *bp, @@ -4997,8 +5011,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp, ¶ms->params.update; u8 next_tx_only = o->num_tx_only; - /* - * Forget all pending for completion commands if a driver only state + /* Forget all pending for completion commands if a driver only state * transition has been requested. */ if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { @@ -5006,12 +5019,14 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp, o->next_state = BNX2X_Q_STATE_MAX; } - /* - * Don't allow a next state transition if we are in the middle of + /* Don't allow a next state transition if we are in the middle of * the previous one. */ - if (o->pending) + if (o->pending) { + BNX2X_ERR("Blocking transition since pending was %lx\n", + o->pending); return -EBUSY; + } switch (state) { case BNX2X_Q_STATE_RESET: @@ -5184,11 +5199,25 @@ void bnx2x_init_queue_obj(struct bnx2x *bp, obj->set_pending = bnx2x_queue_set_pending; } -void bnx2x_queue_set_cos_cid(struct bnx2x *bp, - struct bnx2x_queue_sp_obj *obj, - u32 cid, u8 index) +/* return a queue object's logical state*/ +int bnx2x_get_q_logical_state(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj) { - obj->cids[index] = cid; + switch (obj->state) { + case BNX2X_Q_STATE_ACTIVE: + case BNX2X_Q_STATE_MULTI_COS: + return BNX2X_Q_LOGICAL_STATE_ACTIVE; + case BNX2X_Q_STATE_RESET: + case BNX2X_Q_STATE_INITIALIZED: + case BNX2X_Q_STATE_MCOS_TERMINATED: + case BNX2X_Q_STATE_INACTIVE: + case BNX2X_Q_STATE_STOPPED: + case BNX2X_Q_STATE_TERMINATED: + case BNX2X_Q_STATE_FLRED: + return BNX2X_Q_LOGICAL_STATE_STOPPED; + default: + return -EINVAL; + } } /********************** Function state object *********************************/ @@ -5199,8 +5228,7 @@ enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, if (o->pending) return BNX2X_F_STATE_MAX; - /* - * unsure the order of reading of o->pending and o->state + /* unsure the order of reading of o->pending and o->state * o->pending should be read first */ rmb(); @@ -5232,9 +5260,9 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, unsigned long cur_pending = o->pending; if (!test_and_clear_bit(cmd, &cur_pending)) { - BNX2X_ERR("Bad MC reply %d for func %d in state %d " - "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp), - o->state, cur_pending, o->next_state); + BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n", + cmd, BP_FUNC(bp), o->state, + cur_pending, o->next_state); return -EINVAL; } @@ -5251,7 +5279,7 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, wmb(); clear_bit(cmd, &o->pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return 0; } @@ -5298,8 +5326,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; enum bnx2x_func_cmd cmd = params->cmd; - /* - * Forget all pending for completion commands if a driver only state + /* Forget all pending for completion commands if a driver only state * transition has been requested. */ if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { @@ -5307,8 +5334,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, o->next_state = BNX2X_F_STATE_MAX; } - /* - * Don't allow a next state transition if we are in the middle of + /* Don't allow a next state transition if we are in the middle of * the previous one. */ if (o->pending) @@ -5331,12 +5357,35 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, case BNX2X_F_STATE_STARTED: if (cmd == BNX2X_F_CMD_STOP) next_state = BNX2X_F_STATE_INITIALIZED; + /* afex ramrods can be sent only in started mode, and only + * if not pending for function_stop ramrod completion + * for these events - next state remained STARTED. + */ + else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_STARTED; + + else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_STARTED; + + /* Switch_update ramrod can be sent in either started or + * tx_stopped state, and it doesn't change the state. + */ + else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_STARTED; + else if (cmd == BNX2X_F_CMD_TX_STOP) next_state = BNX2X_F_STATE_TX_STOPPED; break; case BNX2X_F_STATE_TX_STOPPED: - if (cmd == BNX2X_F_CMD_TX_START) + if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_TX_STOPPED; + + else if (cmd == BNX2X_F_CMD_TX_START) next_state = BNX2X_F_STATE_STARTED; break; @@ -5458,7 +5507,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp, goto init_err; } - /* Handle the beginning of COMMON_XXX pases separatelly... */ + /* Handle the beginning of COMMON_XXX pases separately... */ switch (load_code) { case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: rc = bnx2x_func_init_cmn_chip(bp, drv); @@ -5492,7 +5541,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp, init_err: drv->gunzip_end(bp); - /* In case of success, complete the comand immediatelly: no ramrods + /* In case of success, complete the command immediately: no ramrods * have been sent. */ if (!rc) @@ -5517,7 +5566,7 @@ static inline void bnx2x_func_reset_func(struct bnx2x *bp, } /** - * bnx2x_func_reset_port - reser HW at port stage + * bnx2x_func_reset_port - reset HW at port stage * * @bp: device handle * @drv: @@ -5539,7 +5588,7 @@ static inline void bnx2x_func_reset_port(struct bnx2x *bp, } /** - * bnx2x_func_reset_cmn - reser HW at common stage + * bnx2x_func_reset_cmn - reset HW at common stage * * @bp: device handle * @drv: @@ -5555,7 +5604,6 @@ static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, drv->reset_hw_cmn(bp); } - static inline int bnx2x_func_hw_reset(struct bnx2x *bp, struct bnx2x_func_state_params *params) { @@ -5582,7 +5630,7 @@ static inline int bnx2x_func_hw_reset(struct bnx2x *bp, break; } - /* Complete the comand immediatelly: no ramrods have been sent. */ + /* Complete the command immediately: no ramrods have been sent. */ o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); return 0; @@ -5600,22 +5648,128 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, memset(rdata, 0, sizeof(*rdata)); /* Fill the ramrod data with provided parameters */ - rdata->function_mode = cpu_to_le16(start_params->mf_mode); - rdata->sd_vlan_tag = start_params->sd_vlan_tag; - rdata->path_id = BP_PATH(bp); - rdata->network_cos_mode = start_params->network_cos_mode; + rdata->function_mode = (u8)start_params->mf_mode; + rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); + rdata->path_id = BP_PATH(bp); + rdata->network_cos_mode = start_params->network_cos_mode; + rdata->gre_tunnel_mode = start_params->gre_tunnel_mode; + rdata->gre_tunnel_rss = start_params->gre_tunnel_rss; + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ - /* - * No need for an explicit memory barrier here as long we would + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + +static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct function_update_data *rdata = + (struct function_update_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_func_switch_update_params *switch_update_params = + ¶ms->params.switch_update; + + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->tx_switch_suspend_change_flg = 1; + rdata->tx_switch_suspend = switch_update_params->suspend; + rdata->echo = SWITCH_UPDATE; + + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + +static inline int bnx2x_func_send_afex_update(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct function_update_data *rdata = + (struct function_update_data *)o->afex_rdata; + dma_addr_t data_mapping = o->afex_rdata_mapping; + struct bnx2x_func_afex_update_params *afex_update_params = + ¶ms->params.afex_update; + + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->vif_id_change_flg = 1; + rdata->vif_id = cpu_to_le16(afex_update_params->vif_id); + rdata->afex_default_vlan_change_flg = 1; + rdata->afex_default_vlan = + cpu_to_le16(afex_update_params->afex_default_vlan); + rdata->allowed_priorities_change_flg = 1; + rdata->allowed_priorities = afex_update_params->allowed_priorities; + rdata->echo = AFEX_UPDATE; + + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + DP(BNX2X_MSG_SP, + "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n", + rdata->vif_id, + rdata->afex_default_vlan, rdata->allowed_priorities); + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + +static +inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct afex_vif_list_ramrod_data *rdata = + (struct afex_vif_list_ramrod_data *)o->afex_rdata; + struct bnx2x_func_afex_viflists_params *afex_vif_params = + ¶ms->params.afex_viflists; + u64 *p_rdata = (u64 *)rdata; + + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index); + rdata->func_bit_map = afex_vif_params->func_bit_map; + rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command; + rdata->func_to_clear = afex_vif_params->func_to_clear; + + /* send in echo type of sub command */ + rdata->echo = afex_vif_params->afex_vif_list_command; + + /* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory * read and we will have to put a full memory barrier there * (inside bnx2x_sp_post()). */ - return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, - U64_HI(data_mapping), - U64_LO(data_mapping), NONE_CONNECTION_TYPE); + DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n", + rdata->afex_vif_list_command, rdata->vif_list_index, + rdata->func_bit_map, rdata->func_to_clear); + + /* this ramrod sends data directly and not through DMA mapping */ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0, + U64_HI(*p_rdata), U64_LO(*p_rdata), + NONE_CONNECTION_TYPE); } static inline int bnx2x_func_send_stop(struct bnx2x *bp, @@ -5652,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, rdata->traffic_type_to_priority_cos[i] = tx_start_params->traffic_type_to_priority_cos[i]; + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, U64_HI(data_mapping), U64_LO(data_mapping), NONE_CONNECTION_TYPE); @@ -5669,10 +5829,16 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp, return bnx2x_func_send_stop(bp, params); case BNX2X_F_CMD_HW_RESET: return bnx2x_func_hw_reset(bp, params); + case BNX2X_F_CMD_AFEX_UPDATE: + return bnx2x_func_send_afex_update(bp, params); + case BNX2X_F_CMD_AFEX_VIFLISTS: + return bnx2x_func_send_afex_viflists(bp, params); case BNX2X_F_CMD_TX_STOP: return bnx2x_func_send_tx_stop(bp, params); case BNX2X_F_CMD_TX_START: return bnx2x_func_send_tx_start(bp, params); + case BNX2X_F_CMD_SWITCH_UPDATE: + return bnx2x_func_send_switch_update(bp, params); default: BNX2X_ERR("Unknown command: %d\n", params->cmd); return -EINVAL; @@ -5682,6 +5848,7 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp, void bnx2x_init_func_obj(struct bnx2x *bp, struct bnx2x_func_sp_obj *obj, void *rdata, dma_addr_t rdata_mapping, + void *afex_rdata, dma_addr_t afex_rdata_mapping, struct bnx2x_func_sp_drv_ops *drv_iface) { memset(obj, 0, sizeof(*obj)); @@ -5690,7 +5857,8 @@ void bnx2x_init_func_obj(struct bnx2x *bp, obj->rdata = rdata; obj->rdata_mapping = rdata_mapping; - + obj->afex_rdata = afex_rdata; + obj->afex_rdata_mapping = afex_rdata_mapping; obj->send_cmd = bnx2x_func_send_cmd; obj->check_transition = bnx2x_func_chk_transition; obj->complete_cmd = bnx2x_func_comp_cmd; @@ -5716,16 +5884,30 @@ int bnx2x_func_state_change(struct bnx2x *bp, struct bnx2x_func_state_params *params) { struct bnx2x_func_sp_obj *o = params->f_obj; - int rc; + int rc, cnt = 300; enum bnx2x_func_cmd cmd = params->cmd; unsigned long *pending = &o->pending; mutex_lock(&o->one_pending_mutex); /* Check that the requested transition is legal */ - if (o->check_transition(bp, o, params)) { + rc = o->check_transition(bp, o, params); + if ((rc == -EBUSY) && + (test_bit(RAMROD_RETRY, ¶ms->ramrod_flags))) { + while ((rc == -EBUSY) && (--cnt > 0)) { + mutex_unlock(&o->one_pending_mutex); + msleep(10); + mutex_lock(&o->one_pending_mutex); + rc = o->check_transition(bp, o, params); + } + if (rc == -EBUSY) { + mutex_unlock(&o->one_pending_mutex); + BNX2X_ERR("timeout waiting for previous ramrod completion\n"); + return rc; + } + } else if (rc) { mutex_unlock(&o->one_pending_mutex); - return -EINVAL; + return rc; } /* Set "pending" bit */ @@ -5744,7 +5926,7 @@ int bnx2x_func_state_change(struct bnx2x *bp, if (rc) { o->next_state = BNX2X_F_STATE_MAX; clear_bit(cmd, pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 66da39f0c84..718ecd29466 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -1,6 +1,6 @@ /* bnx2x_sp.h: Broadcom Everest network driver. * - * Copyright 2011 Broadcom Corporation + * Copyright (c) 2011-2013 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -12,7 +12,7 @@ * license other than the GPL, without Broadcom's express prior written * consent. * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Vladislav Zolotarov * */ @@ -34,12 +34,17 @@ enum { RAMROD_RESTORE, /* Execute the next command now */ RAMROD_EXEC, - /* - * Don't add a new command and continue execution of posponed + /* Don't add a new command and continue execution of postponed * commands. If not set a new command will be added to the * pending commands list. */ RAMROD_CONT, + /* If there is another pending ramrod, wait until it finishes and + * re-try to submit this one. This flag can be set only in sleepable + * context, and should not be set from the context that completes the + * ramrods as deadlock will occur. + */ + RAMROD_RETRY, }; typedef enum { @@ -48,7 +53,7 @@ typedef enum { BNX2X_OBJ_TYPE_RX_TX, } bnx2x_obj_type; -/* Filtering states */ +/* Public slow path states */ enum { BNX2X_FILTER_MAC_PENDING, BNX2X_FILTER_VLAN_PENDING, @@ -62,6 +67,8 @@ enum { BNX2X_FILTER_MCAST_PENDING, BNX2X_FILTER_MCAST_SCHED, BNX2X_FILTER_RSS_CONF_PENDING, + BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, + BNX2X_AFEX_PENDING_VIFSET_MCP_ACK }; struct bnx2x_raw_obj { @@ -92,6 +99,7 @@ struct bnx2x_raw_obj { /************************* VLAN-MAC commands related parameters ***************/ struct bnx2x_mac_ramrod_data { u8 mac[ETH_ALEN]; + u8 is_inner_mac; }; struct bnx2x_vlan_ramrod_data { @@ -100,6 +108,7 @@ struct bnx2x_vlan_ramrod_data { struct bnx2x_vlan_mac_ramrod_data { u8 mac[ETH_ALEN]; + u8 is_inner_mac; u16 vlan; }; @@ -119,8 +128,7 @@ enum bnx2x_vlan_mac_cmd { struct bnx2x_vlan_mac_data { /* Requested command: BNX2X_VLAN_MAC_XX */ enum bnx2x_vlan_mac_cmd cmd; - /* - * used to contain the data related vlan_mac_flags bits from + /* used to contain the data related vlan_mac_flags bits from * ramrod parameters. */ unsigned long vlan_mac_flags; @@ -165,9 +173,8 @@ typedef int (*exe_q_remove)(struct bnx2x *bp, union bnx2x_qable_obj *o, struct bnx2x_exeq_elem *elem); -/** - * @return positive is entry was optimized, 0 - if not, negative - * in case of an error. +/* Return positive if entry was optimized, 0 - if not, negative + * in case of an error. */ typedef int (*exe_q_optimize)(struct bnx2x *bp, union bnx2x_qable_obj *o, @@ -181,14 +188,10 @@ typedef struct bnx2x_exeq_elem * struct bnx2x_exeq_elem *elem); struct bnx2x_exe_queue_obj { - /* - * Commands pending for an execution. - */ + /* Commands pending for an execution. */ struct list_head exe_queue; - /* - * Commands pending for an completion. - */ + /* Commands pending for an completion. */ struct list_head pending_comp; spinlock_t lock; @@ -236,14 +239,13 @@ struct bnx2x_exe_queue_obj { }; /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ /* - * Element in the VLAN_MAC registry list having all currenty configured + * Element in the VLAN_MAC registry list having all currently configured * rules. */ struct bnx2x_vlan_mac_registry_elem { struct list_head link; - /* - * Used to store the cam offset used for the mac/vlan/vlan-mac. + /* Used to store the cam offset used for the mac/vlan/vlan-mac. * Relevant for 57710 and 57711 only. VLANs and MACs share the * same CAM for these chips. */ @@ -264,6 +266,13 @@ enum { BNX2X_DONT_CONSUME_CAM_CREDIT, BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, }; +/* When looking for matching filters, some flags are not interesting */ +#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \ + 1 << BNX2X_ETH_MAC | \ + 1 << BNX2X_ISCSI_ETH_MAC | \ + 1 << BNX2X_NETQ_ETH_MAC) +#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \ + ((flags) & BNX2X_VLAN_MAC_CMP_MASK) struct bnx2x_vlan_mac_ramrod_params { /* Object to run the command from */ @@ -283,6 +292,12 @@ struct bnx2x_vlan_mac_obj { * entries. */ struct list_head head; + /* Implement a simple reader/writer lock on the head list. + * all these fields should only be accessed under the exe_queue lock + */ + u8 head_reader; /* Num. of readers accessing head list */ + bool head_exe_request; /* Pending execution request. */ + unsigned long saved_ramrod_flags; /* Ramrods of pending execution */ /* TODO: Add it's initialization in the init functions */ struct bnx2x_exe_queue_obj exe_queue; @@ -301,13 +316,14 @@ struct bnx2x_vlan_mac_obj { * @param n number of elements to get * @param buf buffer preallocated by caller into which elements * will be copied. Note elements are 4-byte aligned - * so buffer size must be able to accomodate the + * so buffer size must be able to accommodate the * aligned elements. * * @return number of copied bytes */ - int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, - int n, u8 *buf); + int (*get_n_elements)(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, int n, u8 *base, + u8 stride, u8 size); /** * Checks if ADD-ramrod with the given params may be performed. @@ -315,7 +331,8 @@ struct bnx2x_vlan_mac_obj { * @return zero if the element may be added */ - int (*check_add)(struct bnx2x_vlan_mac_obj *o, + int (*check_add)(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data); /** @@ -324,7 +341,8 @@ struct bnx2x_vlan_mac_obj { * @return true if the element may be deleted */ struct bnx2x_vlan_mac_registry_elem * - (*check_del)(struct bnx2x_vlan_mac_obj *o, + (*check_del)(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data); /** @@ -332,7 +350,8 @@ struct bnx2x_vlan_mac_obj { * * @return true if the element may be deleted */ - bool (*check_move)(struct bnx2x_vlan_mac_obj *src_o, + bool (*check_move)(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *src_o, struct bnx2x_vlan_mac_obj *dst_o, union bnx2x_classification_ramrod_data *data); @@ -382,7 +401,7 @@ struct bnx2x_vlan_mac_obj { * @param bp * @param p Command parameters (RAMROD_COMP_WAIT bit in * ramrod_flags is only taken into an account) - * @param ppos a pointer to the cooky that should be given back in the + * @param ppos a pointer to the cookie that should be given back in the * next call to make function handle the next element. If * *ppos is set to NULL it will restart the iterator. * If returned *ppos == NULL this means that the last @@ -395,7 +414,7 @@ struct bnx2x_vlan_mac_obj { struct bnx2x_vlan_mac_registry_elem **ppos); /** - * Should be called on a completion arival. + * Should be called on a completion arrival. * * @param bp * @param o @@ -423,9 +442,15 @@ struct bnx2x_vlan_mac_obj { int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o); }; +enum { + BNX2X_LLH_CAM_ISCSI_ETH_LINE = 0, + BNX2X_LLH_CAM_ETH_LINE, + BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 +}; + /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ -/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in +/* RX_MODE ramrod special flags: set in rx_mode_flags field in * a bnx2x_rx_mode_ramrod_params. */ enum { @@ -453,8 +478,7 @@ struct bnx2x_rx_mode_ramrod_params { unsigned long ramrod_flags; unsigned long rx_mode_flags; - /* - * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to + /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to * a tstorm_eth_mac_filter_config (e1x). */ void *rdata; @@ -505,7 +529,7 @@ struct bnx2x_mcast_ramrod_params { int mcast_list_len; }; -enum { +enum bnx2x_mcast_cmd { BNX2X_MCAST_CMD_ADD, BNX2X_MCAST_CMD_CONT, BNX2X_MCAST_CMD_DEL, @@ -554,7 +578,8 @@ struct bnx2x_mcast_obj { * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above) */ int (*config_mcast)(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, int cmd); + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd); /** * Fills the ramrod data during the RESTORE flow. @@ -571,11 +596,13 @@ struct bnx2x_mcast_obj { int start_bin, int *rdata_idx); int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, - struct bnx2x_mcast_ramrod_params *p, int cmd); + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd); void (*set_one_rule)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, int idx, - union bnx2x_mcast_config_data *cfg_data, int cmd); + union bnx2x_mcast_config_data *cfg_data, + enum bnx2x_mcast_cmd cmd); /** Checks if there are more mcast MACs to be set or a previous * command is still pending. @@ -598,7 +625,8 @@ struct bnx2x_mcast_obj { * feasible. */ int (*validate)(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, int cmd); + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd); /** * Restore the values of internal counters in case of a failure. @@ -620,12 +648,11 @@ struct bnx2x_credit_pool_obj { /* Maximum allowed credit. put() will check against it. */ int pool_sz; - /* - * Allocate a pool table statically. + /* Allocate a pool table statically. * - * Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272) + * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272) * - * The set bit in the table will mean that the entry is available. + * The set bit in the table will mean that the entry is available. */ #define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) u64 pool_mirror[BNX2X_POOL_VEC_SIZE]; @@ -675,16 +702,15 @@ enum { /* RSS_MODE bits are mutually exclusive */ BNX2X_RSS_MODE_DISABLED, BNX2X_RSS_MODE_REGULAR, - BNX2X_RSS_MODE_VLAN_PRI, - BNX2X_RSS_MODE_E1HOV_PRI, - BNX2X_RSS_MODE_IP_DSCP, BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */ BNX2X_RSS_IPV4, BNX2X_RSS_IPV4_TCP, + BNX2X_RSS_IPV4_UDP, BNX2X_RSS_IPV6, BNX2X_RSS_IPV6_TCP, + BNX2X_RSS_IPV6_UDP, }; struct bnx2x_config_rss_params { @@ -718,6 +744,10 @@ struct bnx2x_rss_config_obj { /* Last configured indirection table */ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + /* flags for enabling 4-tupple hash on UDP */ + u8 udp_rss_v4; + u8 udp_rss_v6; + int (*config_rss)(struct bnx2x *bp, struct bnx2x_config_rss_params *p); }; @@ -737,7 +767,9 @@ enum { BNX2X_Q_UPDATE_DEF_VLAN_EN, BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, - BNX2X_Q_UPDATE_SILENT_VLAN_REM + BNX2X_Q_UPDATE_SILENT_VLAN_REM, + BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, + BNX2X_Q_UPDATE_TX_SWITCHING }; /* Allowed Queue states */ @@ -754,6 +786,12 @@ enum bnx2x_q_state { BNX2X_Q_STATE_MAX, }; +/* Allowed Queue states */ +enum bnx2x_q_logical_state { + BNX2X_Q_LOGICAL_STATE_ACTIVE, + BNX2X_Q_LOGICAL_STATE_STOPPED, +}; + /* Allowed commands */ enum bnx2x_queue_cmd { BNX2X_Q_CMD_INIT, @@ -774,6 +812,7 @@ enum bnx2x_queue_cmd { enum { BNX2X_Q_FLG_TPA, BNX2X_Q_FLG_TPA_IPV6, + BNX2X_Q_FLG_TPA_GRO, BNX2X_Q_FLG_STATS, BNX2X_Q_FLG_ZERO_STATS, BNX2X_Q_FLG_ACTIVE, @@ -790,10 +829,13 @@ enum { BNX2X_Q_FLG_TX_SWITCH, BNX2X_Q_FLG_TX_SEC, BNX2X_Q_FLG_ANTI_SPOOF, - BNX2X_Q_FLG_SILENT_VLAN_REM + BNX2X_Q_FLG_SILENT_VLAN_REM, + BNX2X_Q_FLG_FORCE_DEFAULT_PRI, + BNX2X_Q_FLG_PCSUM_ON_PKT, + BNX2X_Q_FLG_TUN_INC_INNER_IP_ID }; -/* Queue type options: queue type may be a compination of below. */ +/* Queue type options: queue type may be a combination of below. */ enum bnx2x_q_type { /** TODO: Consider moving both these flags into the init() * ramrod params. @@ -803,11 +845,12 @@ enum bnx2x_q_type { }; #define BNX2X_PRIMARY_CID_INDEX 0 -#define BNX2X_MULTI_TX_COS_E1X 1 +#define BNX2X_MULTI_TX_COS_E1X 3 /* QM only */ #define BNX2X_MULTI_TX_COS_E2_E3A0 2 #define BNX2X_MULTI_TX_COS_E3B0 3 -#define BNX2X_MULTI_TX_COS BNX2X_MULTI_TX_COS_E3B0 +#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */ +#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) struct bnx2x_queue_init_params { struct { @@ -850,6 +893,24 @@ struct bnx2x_queue_update_params { u8 cid_index; }; +struct bnx2x_queue_update_tpa_params { + dma_addr_t sge_map; + u8 update_ipv4; + u8 update_ipv6; + u8 max_tpa_queues; + u8 max_sges_pkt; + u8 complete_on_both_clients; + u8 dont_verify_thr; + u8 tpa_mode; + u8 _pad; + + u16 sge_buff_sz; + u16 max_agg_sz; + + u16 sge_pause_thr_low; + u16 sge_pause_thr_high; +}; + struct rxq_pause_params { u16 bd_th_lo; u16 bd_th_hi; @@ -889,6 +950,9 @@ struct bnx2x_rxq_setup_params { u8 max_tpa_queues; u8 rss_engine_id; + /* valid iff BNX2X_Q_FLG_MCAST */ + u8 mcast_engine_id; + u8 cache_line_log; u8 sb_cq_index; @@ -941,6 +1005,7 @@ struct bnx2x_queue_state_params { /* Params according to the current command */ union { struct bnx2x_queue_update_params update; + struct bnx2x_queue_update_tpa_params update_tpa; struct bnx2x_queue_setup_params setup; struct bnx2x_queue_init_params init; struct bnx2x_queue_setup_tx_only_params tx_only; @@ -949,15 +1014,19 @@ struct bnx2x_queue_state_params { } params; }; +struct bnx2x_viflist_params { + u8 echo_res; + u8 func_bit_map_res; +}; + struct bnx2x_queue_sp_obj { u32 cids[BNX2X_MULTI_TX_COS]; u8 cl_id; u8 func_id; - /* - * number of traffic classes supported by queue. - * The primary connection of the queue suppotrs the first traffic - * class. Any further traffic class is suppoted by a tx-only + /* number of traffic classes supported by queue. + * The primary connection of the queue supports the first traffic + * class. Any further traffic class is supported by a tx-only * connection. * * Therefore max_cos is also a number of valid entries in the cids @@ -973,7 +1042,7 @@ struct bnx2x_queue_sp_obj { /* BNX2X_Q_CMD_XX bits. This object implements "one * pending" paradigm but for debug and tracing purposes it's - * more convinient to have different bits for different + * more convenient to have different bits for different * commands. */ unsigned long pending; @@ -1031,8 +1100,11 @@ enum bnx2x_func_cmd { BNX2X_F_CMD_START, BNX2X_F_CMD_STOP, BNX2X_F_CMD_HW_RESET, + BNX2X_F_CMD_AFEX_UPDATE, + BNX2X_F_CMD_AFEX_VIFLISTS, BNX2X_F_CMD_TX_STOP, BNX2X_F_CMD_TX_START, + BNX2X_F_CMD_SWITCH_UPDATE, BNX2X_F_CMD_MAX, }; @@ -1073,8 +1145,33 @@ struct bnx2x_func_start_params { /* Function cos mode */ u8 network_cos_mode; + + /* NVGRE classification enablement */ + u8 nvgre_clss_en; + + /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ + u8 gre_tunnel_mode; + + /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */ + u8 gre_tunnel_rss; +}; + +struct bnx2x_func_switch_update_params { + u8 suspend; }; +struct bnx2x_func_afex_update_params { + u16 vif_id; + u16 afex_default_vlan; + u8 allowed_priorities; +}; + +struct bnx2x_func_afex_viflists_params { + u16 vif_list_index; + u8 func_bit_map; + u8 afex_vif_list_command; + u8 func_to_clear; +}; struct bnx2x_func_tx_start_params { struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; u8 dcb_enabled; @@ -1096,6 +1193,9 @@ struct bnx2x_func_state_params { struct bnx2x_func_hw_init_params hw_init; struct bnx2x_func_hw_reset_params hw_reset; struct bnx2x_func_start_params start; + struct bnx2x_func_switch_update_params switch_update; + struct bnx2x_func_afex_update_params afex_update; + struct bnx2x_func_afex_viflists_params afex_viflists; struct bnx2x_func_tx_start_params tx_start; } params; }; @@ -1131,7 +1231,7 @@ struct bnx2x_func_sp_obj { /* BNX2X_FUNC_CMD_XX bits. This object implements "one * pending" paradigm but for debug and tracing purposes it's - * more convinient to have different bits for different + * more convenient to have different bits for different * commands. */ unsigned long pending; @@ -1140,6 +1240,13 @@ struct bnx2x_func_sp_obj { void *rdata; dma_addr_t rdata_mapping; + /* Buffer to use as a afex ramrod data and its mapping. + * This can't be same rdata as above because afex ramrod requests + * can arrive to the object in parallel to other ramrod requests. + */ + void *afex_rdata; + dma_addr_t afex_rdata_mapping; + /* this mutex validates that when pending flag is taken, the next * ramrod to be sent will be the one set the pending bit */ @@ -1183,6 +1290,7 @@ union bnx2x_qable_obj { void bnx2x_init_func_obj(struct bnx2x *bp, struct bnx2x_func_sp_obj *obj, void *rdata, dma_addr_t rdata_mapping, + void *afex_rdata, dma_addr_t afex_rdata_mapping, struct bnx2x_func_sp_drv_ops *drv_iface); int bnx2x_func_state_change(struct bnx2x *bp, @@ -1199,6 +1307,9 @@ void bnx2x_init_queue_obj(struct bnx2x *bp, int bnx2x_queue_state_change(struct bnx2x *bp, struct bnx2x_queue_state_params *params); +int bnx2x_get_q_logical_state(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj); + /********************* VLAN-MAC ****************/ void bnx2x_init_mac_obj(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *mac_obj, @@ -1214,16 +1325,14 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp, unsigned long *pstate, bnx2x_obj_type type, struct bnx2x_credit_pool_obj *vlans_pool); -void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *vlan_mac_obj, - u8 cl_id, u32 cid, u8 func_id, void *rdata, - dma_addr_t rdata_mapping, int state, - unsigned long *pstate, bnx2x_obj_type type, - struct bnx2x_credit_pool_obj *macs_pool, - struct bnx2x_credit_pool_obj *vlans_pool); - +int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); +void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); +int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); int bnx2x_config_vlan_mac(struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *p); + struct bnx2x_vlan_mac_ramrod_params *p); int bnx2x_vlan_mac_move(struct bnx2x *bp, struct bnx2x_vlan_mac_ramrod_params *p, @@ -1235,12 +1344,11 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp, struct bnx2x_rx_mode_obj *o); /** - * Send and RX_MODE ramrod according to the provided parameters. + * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters. * - * @param bp - * @param p Command parameters + * @p: Command parameters * - * @return 0 - if operation was successfull and there is no pending completions, + * Return: 0 - if operation was successful and there is no pending completions, * positive number - if there are pending completions, * negative - if there were errors */ @@ -1257,7 +1365,11 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp, bnx2x_obj_type type); /** - * Configure multicast MACs list. May configure a new list + * bnx2x_config_mcast - Configure multicast MACs list. + * + * @cmd: command to execute: BNX2X_MCAST_CMD_X + * + * May configure a new list * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current * configuration, continue to execute the pending commands @@ -1268,16 +1380,13 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp, * the current command will be enqueued to the tail of the * pending commands list. * - * @param bp - * @param p - * @param command to execute: BNX2X_MCAST_CMD_X - * - * @return 0 is operation was sucessfull and there are no pending completions, + * Return: 0 is operation was successful and there are no pending completions, * negative if there were errors, positive if there are pending * completions. */ int bnx2x_config_mcast(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, int cmd); + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd); /****************** CREDIT POOL ****************/ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, @@ -1287,7 +1396,6 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, struct bnx2x_credit_pool_obj *p, u8 func_id, u8 func_num); - /****************** RSS CONFIGURATION ****************/ void bnx2x_init_rss_config_obj(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, @@ -1297,21 +1405,17 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp, bnx2x_obj_type type); /** - * Updates RSS configuration according to provided parameters. - * - * @param bp - * @param p + * bnx2x_config_rss - Updates RSS configuration according to provided parameters * - * @return 0 in case of success + * Return: 0 in case of success */ int bnx2x_config_rss(struct bnx2x *bp, struct bnx2x_config_rss_params *p); /** - * Return the current ind_table configuration. + * bnx2x_get_rss_ind_table - Return the current ind_table configuration. * - * @param bp - * @param ind_table buffer to fill with the current indirection + * @ind_table: buffer to fill with the current indirection * table content. Should be at least * T_ETH_INDIRECTION_TABLE_SIZE bytes long. */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c new file mode 100644 index 00000000000..eda8583f6fc --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -0,0 +1,3001 @@ +/* bnx2x_sriov.c: Broadcom Everest network driver. + * + * Copyright 2009-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> + * Written by: Shmulik Ravid + * Ariel Elior <ariel.elior@qlogic.com> + * + */ +#include "bnx2x.h" +#include "bnx2x_init.h" +#include "bnx2x_cmn.h" +#include "bnx2x_sp.h" +#include <linux/crc32.h> +#include <linux/if_vlan.h> + +/* General service functions */ +static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, + u16 pf_id) +{ + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); +} + +static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, + u8 enable) +{ + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), + enable); +} + +int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) +{ + int idx; + + for_each_vf(bp, idx) + if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) + break; + return idx; +} + +static +struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) +{ + u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); + return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; +} + +static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, + u8 igu_sb_id, u8 segment, u16 index, u8 op, + u8 update) +{ + /* acking a VF sb through the PF - use the GRC */ + u32 ctl; + u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; + u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; + u32 func_encode = vf->abs_vfid; + u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; + struct igu_regular cmd_data = {0}; + + cmd_data.sb_id_and_flags = + ((index << IGU_REGULAR_SB_INDEX_SHIFT) | + (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | + (update << IGU_REGULAR_BUPDATE_SHIFT) | + (op << IGU_REGULAR_ENABLE_INT_SHIFT)); + + ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | + func_encode << IGU_CTRL_REG_FID_SHIFT | + IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; + + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + cmd_data.sb_id_and_flags, igu_addr_data); + REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); + mmiowb(); + barrier(); + + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + ctl, igu_addr_ctl); + REG_WR(bp, igu_addr_ctl, ctl); + mmiowb(); + barrier(); +} + +static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, + struct bnx2x_virtf *vf, + bool print_err) +{ + if (!bnx2x_leading_vfq(vf, sp_initialized)) { + if (print_err) + BNX2X_ERR("Slowpath objects not yet initialized!\n"); + else + DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n"); + return false; + } + return true; +} + +/* VFOP operations states */ +void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_queue_init_params *init_params, + struct bnx2x_queue_setup_params *setup_params, + u16 q_idx, u16 sb_idx) +{ + DP(BNX2X_MSG_IOV, + "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", + vf->abs_vfid, + q_idx, + sb_idx, + init_params->tx.sb_cq_index, + init_params->tx.hc_rate, + setup_params->flags, + setup_params->txq_params.traffic_type); +} + +void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_queue_init_params *init_params, + struct bnx2x_queue_setup_params *setup_params, + u16 q_idx, u16 sb_idx) +{ + struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; + + DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" + "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", + vf->abs_vfid, + q_idx, + sb_idx, + init_params->rx.sb_cq_index, + init_params->rx.hc_rate, + setup_params->gen_params.mtu, + rxq_params->buf_sz, + rxq_params->sge_buf_sz, + rxq_params->max_sges_pkt, + rxq_params->tpa_agg_sz, + setup_params->flags, + rxq_params->drop_flags, + rxq_params->cache_line_log); +} + +void bnx2x_vfop_qctor_prep(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vf_queue *q, + struct bnx2x_vf_queue_construct_params *p, + unsigned long q_type) +{ + struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; + struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; + + /* INIT */ + + /* Enable host coalescing in the transition to INIT state */ + if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) + __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); + + if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) + __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); + + /* FW SB ID */ + init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); + init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); + + /* context */ + init_p->cxts[0] = q->cxt; + + /* SETUP */ + + /* Setup-op general parameters */ + setup_p->gen_params.spcl_id = vf->sp_cl_id; + setup_p->gen_params.stat_id = vfq_stat_id(vf, q); + + /* Setup-op pause params: + * Nothing to do, the pause thresholds are set by default to 0 which + * effectively turns off the feature for this queue. We don't want + * one queue (VF) to interfering with another queue (another VF) + */ + if (vf->cfg_flags & VF_CFG_FW_FC) + BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", + vf->abs_vfid); + /* Setup-op flags: + * collect statistics, zero statistics, local-switching, security, + * OV for Flex10, RSS and MCAST for leading + */ + if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) + __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); + + /* for VFs, enable tx switching, bd coherency, and mac address + * anti-spoofing + */ + __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); + __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); + __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); + + /* Setup-op rx parameters */ + if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { + struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; + + rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); + rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); + rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); + + if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) + rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; + } + + /* Setup-op tx parameters */ + if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { + setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; + setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); + } +} + +static int bnx2x_vf_queue_create(struct bnx2x *bp, + struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_queue_construct_params *qctor) +{ + struct bnx2x_queue_state_params *q_params; + int rc = 0; + + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); + + /* Prepare ramrod information */ + q_params = &qctor->qstate; + q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj); + set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags); + + if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == + BNX2X_Q_LOGICAL_STATE_ACTIVE) { + DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n"); + goto out; + } + + /* Run Queue 'construction' ramrods */ + q_params->cmd = BNX2X_Q_CMD_INIT; + rc = bnx2x_queue_state_change(bp, q_params); + if (rc) + goto out; + + memcpy(&q_params->params.setup, &qctor->prep_qsetup, + sizeof(struct bnx2x_queue_setup_params)); + q_params->cmd = BNX2X_Q_CMD_SETUP; + rc = bnx2x_queue_state_change(bp, q_params); + if (rc) + goto out; + + /* enable interrupts */ + bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), + USTORM_ID, 0, IGU_INT_ENABLE, 0); +out: + return rc; +} + +static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid) +{ + enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT, + BNX2X_Q_CMD_TERMINATE, + BNX2X_Q_CMD_CFC_DEL}; + struct bnx2x_queue_state_params q_params; + int rc, i; + + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + /* Prepare ramrod information */ + memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params)); + q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + + if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == + BNX2X_Q_LOGICAL_STATE_STOPPED) { + DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n"); + goto out; + } + + /* Run Queue 'destruction' ramrods */ + for (i = 0; i < ARRAY_SIZE(cmds); i++) { + q_params.cmd = cmds[i]; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]); + return rc; + } + } +out: + /* Clean Context */ + if (bnx2x_vfq(vf, qid, cxt)) { + bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0; + bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0; + } + + return 0; +} + +static void +bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) +{ + struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); + if (vf) { + /* the first igu entry belonging to VFs of this PF */ + if (!BP_VFDB(bp)->first_vf_igu_entry) + BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; + + /* the first igu entry belonging to this VF */ + if (!vf_sb_count(vf)) + vf->igu_base_id = igu_sb_id; + + ++vf_sb_count(vf); + ++vf->sb_count; + } + BP_VFDB(bp)->vf_sbs_pool++; +} + +static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *obj, + atomic_t *counter) +{ + struct list_head *pos; + int read_lock; + int cnt = 0; + + read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); + if (read_lock) + DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); + + list_for_each(pos, &obj->head) + cnt++; + + if (!read_lock) + bnx2x_vlan_mac_h_read_unlock(bp, obj); + + atomic_set(counter, cnt); +} + +static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid, bool drv_only, bool mac) +{ + struct bnx2x_vlan_mac_ramrod_params ramrod; + int rc; + + DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, + mac ? "MACs" : "VLANs"); + + /* Prepare ramrod params */ + memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); + if (mac) { + set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); + } else { + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); + } + ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; + + set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); + else + set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); + + /* Start deleting */ + rc = ramrod.vlan_mac_obj->delete_all(bp, + ramrod.vlan_mac_obj, + &ramrod.user_req.vlan_mac_flags, + &ramrod.ramrod_flags); + if (rc) { + BNX2X_ERR("Failed to delete all %s\n", + mac ? "MACs" : "VLANs"); + return rc; + } + + /* Clear the vlan counters */ + if (!mac) + atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0); + + return 0; +} + +static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, + struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_mac_vlan_filter *filter, + bool drv_only) +{ + struct bnx2x_vlan_mac_ramrod_params ramrod; + int rc; + + DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", + vf->abs_vfid, filter->add ? "Adding" : "Deleting", + filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); + + /* Prepare ramrod params */ + memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); + if (filter->type == BNX2X_VF_FILTER_VLAN) { + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); + ramrod.user_req.u.vlan.vlan = filter->vid; + } else { + set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); + memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN); + } + ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : + BNX2X_VLAN_MAC_DEL; + + /* Verify there are available vlan credits */ + if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && + (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= + vf_vlan_rules_cnt(vf))) { + BNX2X_ERR("No credits for vlan [%d >= %d]\n", + atomic_read(&bnx2x_vfq(vf, qid, vlan_count)), + vf_vlan_rules_cnt(vf)); + return -ENOMEM; + } + + set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); + else + set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); + + /* Add/Remove the filter */ + rc = bnx2x_config_vlan_mac(bp, &ramrod); + if (rc && rc != -EEXIST) { + BNX2X_ERR("Failed to %s %s\n", + filter->add ? "add" : "delete", + filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : + "VLAN"); + return rc; + } + + /* Update the vlan counters */ + if (filter->type == BNX2X_VF_FILTER_VLAN) + bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, + &bnx2x_vfq(vf, qid, vlan_count)); + + return 0; +} + +int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mac_vlan_filters *filters, + int qid, bool drv_only) +{ + int rc = 0, i; + + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) + return -EINVAL; + + /* Prepare ramrod params */ + for (i = 0; i < filters->count; i++) { + rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, + &filters->filters[i], drv_only); + if (rc) + break; + } + + /* Rollback if needed */ + if (i != filters->count) { + BNX2X_ERR("Managed only %d/%d filters - rolling back\n", + i, filters->count + 1); + while (--i >= 0) { + filters->filters[i].add = !filters->filters[i].add; + bnx2x_vf_mac_vlan_config(bp, vf, qid, + &filters->filters[i], + drv_only); + } + } + + /* It's our responsibility to free the filters */ + kfree(filters); + + return rc; +} + +int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_queue_construct_params *qctor) +{ + int rc; + + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); + + rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); + if (rc) + goto op_err; + + /* Configure vlan0 for leading queue */ + if (!qid) { + struct bnx2x_vf_mac_vlan_filter filter; + + memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter)); + filter.type = BNX2X_VF_FILTER_VLAN; + filter.add = true; + filter.vid = 0; + rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); + if (rc) + goto op_err; + } + + /* Schedule the configuration of any pending vlan filters */ + vf->cfg_flags |= VF_CFG_VLAN; + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, + BNX2X_MSG_IOV); + return 0; +op_err: + BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); + return rc; +} + +static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid) +{ + int rc; + + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); + + /* If needed, clean the filtering data base */ + if ((qid == LEADING_IDX) && + bnx2x_validate_vf_sp_objs(bp, vf, false)) { + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); + if (rc) + goto op_err; + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); + if (rc) + goto op_err; + } + + /* Terminate queue */ + if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) { + struct bnx2x_queue_state_params qstate; + + memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); + qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + qstate.q_obj->state = BNX2X_Q_STATE_STOPPED; + qstate.cmd = BNX2X_Q_CMD_TERMINATE; + set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); + rc = bnx2x_queue_state_change(bp, &qstate); + if (rc) + goto op_err; + } + + return 0; +op_err: + BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); + return rc; +} + +int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, + bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only) +{ + struct bnx2x_mcast_list_elem *mc = NULL; + struct bnx2x_mcast_ramrod_params mcast; + int rc, i; + + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + /* Prepare Multicast command */ + memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params)); + mcast.mcast_obj = &vf->mcast_obj; + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags); + else + set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags); + if (mc_num) { + mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem), + GFP_KERNEL); + if (!mc) { + BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n"); + return -ENOMEM; + } + } + + /* clear existing mcasts */ + mcast.mcast_list_len = vf->mcast_list_len; + vf->mcast_list_len = mc_num; + rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); + if (rc) { + BNX2X_ERR("Failed to remove multicasts\n"); + if (mc) + kfree(mc); + return rc; + } + + /* update mcast list on the ramrod params */ + if (mc_num) { + INIT_LIST_HEAD(&mcast.mcast_list); + for (i = 0; i < mc_num; i++) { + mc[i].mac = mcasts[i]; + list_add_tail(&mc[i].link, + &mcast.mcast_list); + } + + /* add new mcasts */ + mcast.mcast_list_len = mc_num; + rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); + if (rc) + BNX2X_ERR("Faled to add multicasts\n"); + kfree(mc); + } + + return rc; +} + +static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, + struct bnx2x_rx_mode_ramrod_params *ramrod, + struct bnx2x_virtf *vf, + unsigned long accept_flags) +{ + struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); + + memset(ramrod, 0, sizeof(*ramrod)); + ramrod->cid = vfq->cid; + ramrod->cl_id = vfq_cl_id(vf, vfq); + ramrod->rx_mode_obj = &bp->rx_mode_obj; + ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); + ramrod->rx_accept_flags = accept_flags; + ramrod->tx_accept_flags = accept_flags; + ramrod->pstate = &vf->filter_state; + ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; + + set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); + set_bit(RAMROD_RX, &ramrod->ramrod_flags); + set_bit(RAMROD_TX, &ramrod->ramrod_flags); + + ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); + ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); +} + +int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid, unsigned long accept_flags) +{ + struct bnx2x_rx_mode_ramrod_params ramrod; + + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); + set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); + vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags; + return bnx2x_config_rx_mode(bp, &ramrod); +} + +int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) +{ + int rc; + + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); + + /* Remove all classification configuration for leading queue */ + if (qid == LEADING_IDX) { + rc = bnx2x_vf_rxmode(bp, vf, qid, 0); + if (rc) + goto op_err; + + /* Remove filtering if feasible */ + if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, + false, false); + if (rc) + goto op_err; + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, + false, true); + if (rc) + goto op_err; + rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); + if (rc) + goto op_err; + } + } + + /* Destroy queue */ + rc = bnx2x_vf_queue_destroy(bp, vf, qid); + if (rc) + goto op_err; + return rc; +op_err: + BNX2X_ERR("vf[%d:%d] error: rc %d\n", + vf->abs_vfid, qid, rc); + return rc; +} + +/* VF enable primitives + * when pretend is required the caller is responsible + * for calling pretend prior to calling these routines + */ + +/* internal vf enable - until vf is enabled internally all transactions + * are blocked. This routine should always be called last with pretend. + */ +static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) +{ + REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); +} + +/* clears vf error in all semi blocks */ +static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) +{ + REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); + REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); + REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); + REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); +} + +static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) +{ + u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; + u32 was_err_reg = 0; + + switch (was_err_group) { + case 0: + was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; + break; + case 1: + was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; + break; + case 2: + was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; + break; + case 3: + was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; + break; + } + REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); +} + +static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int i; + u32 val; + + /* Set VF masks and configuration - pretend */ + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); + + REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); + REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); + REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); + REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); + REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); + REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); + + val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); + val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); + if (vf->cfg_flags & VF_CFG_INT_SIMD) + val |= IGU_VF_CONF_SINGLE_ISR_EN; + val &= ~IGU_VF_CONF_PARENT_MASK; + val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; + REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); + + DP(BNX2X_MSG_IOV, + "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n", + vf->abs_vfid, val); + + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + /* iterate over all queues, clear sb consumer */ + for (i = 0; i < vf_sb_count(vf); i++) { + u8 igu_sb_id = vf_igu_sb(vf, i); + + /* zero prod memory */ + REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); + + /* clear sb state machine */ + bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, + false /* VF */); + + /* disable + update */ + bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, + IGU_INT_DISABLE, 1); + } +} + +void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) +{ + /* set the VF-PF association in the FW */ + storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); + storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); + + /* clear vf errors*/ + bnx2x_vf_semi_clear_err(bp, abs_vfid); + bnx2x_vf_pglue_clear_err(bp, abs_vfid); + + /* internal vf-enable - pretend */ + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); + DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); + bnx2x_vf_enable_internal(bp, true); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); +} + +static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + /* Reset vf in IGU interrupts are still disabled */ + bnx2x_vf_igu_reset(bp, vf); + + /* pretend to enable the vf with the PBF */ + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); + REG_WR(bp, PBF_REG_DISABLE_VF, 0); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); +} + +static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) +{ + struct pci_dev *dev; + struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); + + if (!vf) + return false; + + dev = pci_get_bus_and_slot(vf->bus, vf->devfn); + if (dev) + return bnx2x_is_pcie_pending(dev); + return false; +} + +int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) +{ + /* Verify no pending pci transactions */ + if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) + BNX2X_ERR("PCIE Transactions still pending\n"); + + return 0; +} + +static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp, + struct bnx2x_virtf *vf, + int new) +{ + int num = vf_vlan_rules_cnt(vf); + int diff = new - num; + bool rc = true; + + DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n", + vf->abs_vfid, new, num); + + if (diff > 0) + rc = bp->vlans_pool.get(&bp->vlans_pool, diff); + else if (diff < 0) + rc = bp->vlans_pool.put(&bp->vlans_pool, -diff); + + if (rc) + vf_vlan_rules_cnt(vf) = new; + else + DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n", + vf->abs_vfid); +} + +/* must be called after the number of PF queues and the number of VFs are + * both known + */ +static void +bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct vf_pf_resc_request *resc = &vf->alloc_resc; + u16 vlan_count = 0; + + /* will be set only during VF-ACQUIRE */ + resc->num_rxqs = 0; + resc->num_txqs = 0; + + /* no credit calculations for macs (just yet) */ + resc->num_mac_filters = 1; + + /* divvy up vlan rules */ + bnx2x_iov_re_set_vlan_filters(bp, vf, 0); + vlan_count = bp->vlans_pool.check(&bp->vlans_pool); + vlan_count = 1 << ilog2(vlan_count); + bnx2x_iov_re_set_vlan_filters(bp, vf, + vlan_count / BNX2X_NR_VIRTFN(bp)); + + /* no real limitation */ + resc->num_mc_filters = 0; + + /* num_sbs already set */ + resc->num_sbs = vf->sb_count; +} + +/* FLR routines: */ +static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + /* reset the state variables */ + bnx2x_iov_static_resc(bp, vf); + vf->state = VF_FREE; +} + +static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); + + /* DQ usage counter */ + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); + bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, + "DQ VF usage counter timed out", + poll_cnt); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + /* FW cleanup command - poll for the results */ + if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), + poll_cnt)) + BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); + + /* verify TX hw is flushed */ + bnx2x_tx_hw_flushed(bp, poll_cnt); +} + +static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int rc, i; + + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + /* the cleanup operations are valid if and only if the VF + * was first acquired. + */ + for (i = 0; i < vf_rxq_count(vf); i++) { + rc = bnx2x_vf_queue_flr(bp, vf, i); + if (rc) + goto out; + } + + /* remove multicasts */ + bnx2x_vf_mcast(bp, vf, NULL, 0, true); + + /* dispatch final cleanup and wait for HW queues to flush */ + bnx2x_vf_flr_clnup_hw(bp, vf); + + /* release VF resources */ + bnx2x_vf_free_resc(bp, vf); + + /* re-open the mailbox */ + bnx2x_vf_enable_mbx(bp, vf->abs_vfid); + return; +out: + BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n", + vf->abs_vfid, i, rc); +} + +static void bnx2x_vf_flr_clnup(struct bnx2x *bp) +{ + struct bnx2x_virtf *vf; + int i; + + for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { + /* VF should be RESET & in FLR cleanup states */ + if (bnx2x_vf(bp, i, state) != VF_RESET || + !bnx2x_vf(bp, i, flr_clnup_stage)) + continue; + + DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", + i, BNX2X_NR_VIRTFN(bp)); + + vf = BP_VF(bp, i); + + /* lock the vf pf channel */ + bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); + + /* invoke the VF FLR SM */ + bnx2x_vf_flr(bp, vf); + + /* mark the VF to be ACKED and continue */ + vf->flr_clnup_stage = false; + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); + } + + /* Acknowledge the handled VFs. + * we are acknowledge all the vfs which an flr was requested for, even + * if amongst them there are such that we never opened, since the mcp + * will interrupt us immediately again if we only ack some of the bits, + * resulting in an endless loop. This can happen for example in KVM + * where an 'all ones' flr request is sometimes given by hyper visor + */ + DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", + bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); + for (i = 0; i < FLRD_VFS_DWORDS; i++) + SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], + bp->vfdb->flrd_vfs[i]); + + bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); + + /* clear the acked bits - better yet if the MCP implemented + * write to clear semantics + */ + for (i = 0; i < FLRD_VFS_DWORDS; i++) + SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); +} + +void bnx2x_vf_handle_flr_event(struct bnx2x *bp) +{ + int i; + + /* Read FLR'd VFs */ + for (i = 0; i < FLRD_VFS_DWORDS; i++) + bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); + + DP(BNX2X_MSG_MCP, + "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", + bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); + + for_each_vf(bp, i) { + struct bnx2x_virtf *vf = BP_VF(bp, i); + u32 reset = 0; + + if (vf->abs_vfid < 32) + reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); + else + reset = bp->vfdb->flrd_vfs[1] & + (1 << (vf->abs_vfid - 32)); + + if (reset) { + /* set as reset and ready for cleanup */ + vf->state = VF_RESET; + vf->flr_clnup_stage = true; + + DP(BNX2X_MSG_IOV, + "Initiating Final cleanup for VF %d\n", + vf->abs_vfid); + } + } + + /* do the FLR cleanup for all marked VFs*/ + bnx2x_vf_flr_clnup(bp); +} + +/* IOV global initialization routines */ +void bnx2x_iov_init_dq(struct bnx2x *bp) +{ + if (!IS_SRIOV(bp)) + return; + + /* Set the DQ such that the CID reflect the abs_vfid */ + REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); + REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); + + /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to + * the PF L2 queues + */ + REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); + + /* The VF window size is the log2 of the max number of CIDs per VF */ + REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); + + /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match + * the Pf doorbell size although the 2 are independent. + */ + REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); + + /* No security checks for now - + * configure single rule (out of 16) mask = 0x1, value = 0x0, + * CID range 0 - 0x1ffff + */ + REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); + REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); + REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); + REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); + + /* set the VF doorbell threshold. This threshold represents the amount + * of doorbells allowed in the main DORQ fifo for a specific VF. + */ + REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64); +} + +void bnx2x_iov_init_dmae(struct bnx2x *bp) +{ + if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) + REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); +} + +static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) +{ + struct pci_dev *dev = bp->pdev; + struct bnx2x_sriov *iov = &bp->vfdb->sriov; + + return dev->bus->number + ((dev->devfn + iov->offset + + iov->stride * vfid) >> 8); +} + +static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) +{ + struct pci_dev *dev = bp->pdev; + struct bnx2x_sriov *iov = &bp->vfdb->sriov; + + return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; +} + +static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int i, n; + struct pci_dev *dev = bp->pdev; + struct bnx2x_sriov *iov = &bp->vfdb->sriov; + + for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { + u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); + u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); + + size /= iov->total; + vf->bars[n].bar = start + size * vf->abs_vfid; + vf->bars[n].size = size; + } +} + +static int bnx2x_ari_enabled(struct pci_dev *dev) +{ + return dev->bus->self && dev->bus->self->ari_enabled; +} + +static void +bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) +{ + int sb_id; + u32 val; + u8 fid, current_pf = 0; + + /* IGU in normal mode - read CAM */ + for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { + val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); + if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) + continue; + fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); + if (fid & IGU_FID_ENCODE_IS_PF) + current_pf = fid & IGU_FID_PF_NUM_MASK; + else if (current_pf == BP_FUNC(bp)) + bnx2x_vf_set_igu_info(bp, sb_id, + (fid & IGU_FID_VF_NUM_MASK)); + DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", + ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), + ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : + (fid & IGU_FID_VF_NUM_MASK)), sb_id, + GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); + } + DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); +} + +static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) +{ + if (bp->vfdb) { + kfree(bp->vfdb->vfqs); + kfree(bp->vfdb->vfs); + kfree(bp->vfdb); + } + bp->vfdb = NULL; +} + +static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) +{ + int pos; + struct pci_dev *dev = bp->pdev; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + BNX2X_ERR("failed to find SRIOV capability in device\n"); + return -ENODEV; + } + + iov->pos = pos; + DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); + pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); + pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); + pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); + pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); + pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); + pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); + pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); + pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); + + return 0; +} + +static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) +{ + u32 val; + + /* read the SRIOV capability structure + * The fields can be read via configuration read or + * directly from the device (starting at offset PCICFG_OFFSET) + */ + if (bnx2x_sriov_pci_cfg_info(bp, iov)) + return -ENODEV; + + /* get the number of SRIOV bars */ + iov->nres = 0; + + /* read the first_vfid */ + val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); + iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) + * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); + + DP(BNX2X_MSG_IOV, + "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", + BP_FUNC(bp), + iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, + iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); + + return 0; +} + +/* must be called after PF bars are mapped */ +int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, + int num_vfs_param) +{ + int err, i; + struct bnx2x_sriov *iov; + struct pci_dev *dev = bp->pdev; + + bp->vfdb = NULL; + + /* verify is pf */ + if (IS_VF(bp)) + return 0; + + /* verify sriov capability is present in configuration space */ + if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) + return 0; + + /* verify chip revision */ + if (CHIP_IS_E1x(bp)) + return 0; + + /* check if SRIOV support is turned off */ + if (!num_vfs_param) + return 0; + + /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ + if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { + BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", + BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); + return 0; + } + + /* SRIOV can be enabled only with MSIX */ + if (int_mode_param == BNX2X_INT_MODE_MSI || + int_mode_param == BNX2X_INT_MODE_INTX) { + BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); + return 0; + } + + err = -EIO; + /* verify ari is enabled */ + if (!bnx2x_ari_enabled(bp->pdev)) { + BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); + return 0; + } + + /* verify igu is in normal mode */ + if (CHIP_INT_MODE_IS_BC(bp)) { + BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); + return 0; + } + + /* allocate the vfs database */ + bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); + if (!bp->vfdb) { + BNX2X_ERR("failed to allocate vf database\n"); + err = -ENOMEM; + goto failed; + } + + /* get the sriov info - Linux already collected all the pertinent + * information, however the sriov structure is for the private use + * of the pci module. Also we want this information regardless + * of the hyper-visor. + */ + iov = &(bp->vfdb->sriov); + err = bnx2x_sriov_info(bp, iov); + if (err) + goto failed; + + /* SR-IOV capability was enabled but there are no VFs*/ + if (iov->total == 0) + goto failed; + + iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); + + DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", + num_vfs_param, iov->nr_virtfn); + + /* allocate the vf array */ + bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * + BNX2X_NR_VIRTFN(bp), GFP_KERNEL); + if (!bp->vfdb->vfs) { + BNX2X_ERR("failed to allocate vf array\n"); + err = -ENOMEM; + goto failed; + } + + /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ + for_each_vf(bp, i) { + bnx2x_vf(bp, i, index) = i; + bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; + bnx2x_vf(bp, i, state) = VF_FREE; + mutex_init(&bnx2x_vf(bp, i, op_mutex)); + bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; + } + + /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ + bnx2x_get_vf_igu_cam_info(bp); + + /* allocate the queue arrays for all VFs */ + bp->vfdb->vfqs = kzalloc( + BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), + GFP_KERNEL); + + DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); + + if (!bp->vfdb->vfqs) { + BNX2X_ERR("failed to allocate vf queue array\n"); + err = -ENOMEM; + goto failed; + } + + /* Prepare the VFs event synchronization mechanism */ + mutex_init(&bp->vfdb->event_mutex); + + return 0; +failed: + DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); + __bnx2x_iov_free_vfdb(bp); + return err; +} + +void bnx2x_iov_remove_one(struct bnx2x *bp) +{ + int vf_idx; + + /* if SRIOV is not enabled there's nothing to do */ + if (!IS_SRIOV(bp)) + return; + + DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); + pci_disable_sriov(bp->pdev); + DP(BNX2X_MSG_IOV, "sriov disabled\n"); + + /* disable access to all VFs */ + for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { + bnx2x_pretend_func(bp, + HW_VF_HANDLE(bp, + bp->vfdb->sriov.first_vf_in_pf + + vf_idx)); + DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", + bp->vfdb->sriov.first_vf_in_pf + vf_idx); + bnx2x_vf_enable_internal(bp, 0); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + } + + /* free vf database */ + __bnx2x_iov_free_vfdb(bp); +} + +void bnx2x_iov_free_mem(struct bnx2x *bp) +{ + int i; + + if (!IS_SRIOV(bp)) + return; + + /* free vfs hw contexts */ + for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { + struct hw_dma *cxt = &bp->vfdb->context[i]; + BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); + } + + BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, + BP_VFDB(bp)->sp_dma.mapping, + BP_VFDB(bp)->sp_dma.size); + + BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, + BP_VF_MBX_DMA(bp)->mapping, + BP_VF_MBX_DMA(bp)->size); + + BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, + BP_VF_BULLETIN_DMA(bp)->mapping, + BP_VF_BULLETIN_DMA(bp)->size); +} + +int bnx2x_iov_alloc_mem(struct bnx2x *bp) +{ + size_t tot_size; + int i, rc = 0; + + if (!IS_SRIOV(bp)) + return rc; + + /* allocate vfs hw contexts */ + tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * + BNX2X_CIDS_PER_VF * sizeof(union cdu_context); + + for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { + struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); + cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); + + if (cxt->size) { + cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size); + if (!cxt->addr) + goto alloc_mem_err; + } else { + cxt->addr = NULL; + cxt->mapping = 0; + } + tot_size -= cxt->size; + } + + /* allocate vfs ramrods dma memory - client_init and set_mac */ + tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); + BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, + tot_size); + if (!BP_VFDB(bp)->sp_dma.addr) + goto alloc_mem_err; + BP_VFDB(bp)->sp_dma.size = tot_size; + + /* allocate mailboxes */ + tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; + BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, + tot_size); + if (!BP_VF_MBX_DMA(bp)->addr) + goto alloc_mem_err; + + BP_VF_MBX_DMA(bp)->size = tot_size; + + /* allocate local bulletin boards */ + tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; + BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, + tot_size); + if (!BP_VF_BULLETIN_DMA(bp)->addr) + goto alloc_mem_err; + + BP_VF_BULLETIN_DMA(bp)->size = tot_size; + + return 0; + +alloc_mem_err: + return -ENOMEM; +} + +static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_queue *q) +{ + u8 cl_id = vfq_cl_id(vf, q); + u8 func_id = FW_VF_HANDLE(vf->abs_vfid); + unsigned long q_type = 0; + + set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + + /* Queue State object */ + bnx2x_init_queue_obj(bp, &q->sp_obj, + cl_id, &q->cid, 1, func_id, + bnx2x_vf_sp(bp, vf, q_data), + bnx2x_vf_sp_map(bp, vf, q_data), + q_type); + + /* sp indication is set only when vlan/mac/etc. are initialized */ + q->sp_initialized = false; + + DP(BNX2X_MSG_IOV, + "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", + vf->abs_vfid, q->sp_obj.func_id, q->cid); +} + +/* called by bnx2x_nic_load */ +int bnx2x_iov_nic_init(struct bnx2x *bp) +{ + int vfid; + + if (!IS_SRIOV(bp)) { + DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); + return 0; + } + + DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); + + /* let FLR complete ... */ + msleep(100); + + /* initialize vf database */ + for_each_vf(bp, vfid) { + struct bnx2x_virtf *vf = BP_VF(bp, vfid); + + int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * + BNX2X_CIDS_PER_VF; + + union cdu_context *base_cxt = (union cdu_context *) + BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + + (base_vf_cid & (ILT_PAGE_CIDS-1)); + + DP(BNX2X_MSG_IOV, + "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", + vf->abs_vfid, vf_sb_count(vf), base_vf_cid, + BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); + + /* init statically provisioned resources */ + bnx2x_iov_static_resc(bp, vf); + + /* queues are initialized during VF-ACQUIRE */ + vf->filter_state = 0; + vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); + + /* init mcast object - This object will be re-initialized + * during VF-ACQUIRE with the proper cl_id and cid. + * It needs to be initialized here so that it can be safely + * handled by a subsequent FLR flow. + */ + vf->mcast_list_len = 0; + bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, + 0xFF, 0xFF, 0xFF, + bnx2x_vf_sp(bp, vf, mcast_rdata), + bnx2x_vf_sp_map(bp, vf, mcast_rdata), + BNX2X_FILTER_MCAST_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX); + + /* set the mailbox message addresses */ + BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) + (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * + MBX_MSG_ALIGNED_SIZE); + + BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + + vfid * MBX_MSG_ALIGNED_SIZE; + + /* Enable vf mailbox */ + bnx2x_vf_enable_mbx(bp, vf->abs_vfid); + } + + /* Final VF init */ + for_each_vf(bp, vfid) { + struct bnx2x_virtf *vf = BP_VF(bp, vfid); + + /* fill in the BDF and bars */ + vf->bus = bnx2x_vf_bus(bp, vfid); + vf->devfn = bnx2x_vf_devfn(bp, vfid); + bnx2x_vf_set_bars(bp, vf); + + DP(BNX2X_MSG_IOV, + "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", + vf->abs_vfid, vf->bus, vf->devfn, + (unsigned)vf->bars[0].bar, vf->bars[0].size, + (unsigned)vf->bars[1].bar, vf->bars[1].size, + (unsigned)vf->bars[2].bar, vf->bars[2].size); + } + + return 0; +} + +/* called by bnx2x_chip_cleanup */ +int bnx2x_iov_chip_cleanup(struct bnx2x *bp) +{ + int i; + + if (!IS_SRIOV(bp)) + return 0; + + /* release all the VFs */ + for_each_vf(bp, i) + bnx2x_vf_release(bp, BP_VF(bp, i)); + + return 0; +} + +/* called by bnx2x_init_hw_func, returns the next ilt line */ +int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) +{ + int i; + struct bnx2x_ilt *ilt = BP_ILT(bp); + + if (!IS_SRIOV(bp)) + return line; + + /* set vfs ilt lines */ + for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { + struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); + + ilt->lines[line+i].page = hw_cxt->addr; + ilt->lines[line+i].page_mapping = hw_cxt->mapping; + ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ + } + return line + i; +} + +static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) +{ + return ((cid >= BNX2X_FIRST_VF_CID) && + ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); +} + +static +void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, + struct bnx2x_vf_queue *vfq, + union event_ring_elem *elem) +{ + unsigned long ramrod_flags = 0; + int rc = 0; + + /* Always push next commands out, don't wait here */ + set_bit(RAMROD_CONT, &ramrod_flags); + + switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { + case BNX2X_FILTER_MAC_PENDING: + rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, + &ramrod_flags); + break; + case BNX2X_FILTER_VLAN_PENDING: + rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, + &ramrod_flags); + break; + default: + BNX2X_ERR("Unsupported classification command: %d\n", + elem->message.data.eth_event.echo); + return; + } + if (rc < 0) + BNX2X_ERR("Failed to schedule new commands: %d\n", rc); + else if (rc > 0) + DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); +} + +static +void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + struct bnx2x_mcast_ramrod_params rparam = {NULL}; + int rc; + + rparam.mcast_obj = &vf->mcast_obj; + vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); + + /* If there are pending mcast commands - send them */ + if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); + if (rc < 0) + BNX2X_ERR("Failed to send pending mcast commands: %d\n", + rc); + } +} + +static +void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + smp_mb__before_atomic(); + clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); + smp_mb__after_atomic(); +} + +static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw); +} + +int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) +{ + struct bnx2x_virtf *vf; + int qidx = 0, abs_vfid; + u8 opcode; + u16 cid = 0xffff; + + if (!IS_SRIOV(bp)) + return 1; + + /* first get the cid - the only events we handle here are cfc-delete + * and set-mac completion + */ + opcode = elem->message.opcode; + + switch (opcode) { + case EVENT_RING_OPCODE_CFC_DEL: + cid = SW_CID((__force __le32) + elem->message.data.cfc_del_event.cid); + DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); + break; + case EVENT_RING_OPCODE_CLASSIFICATION_RULES: + case EVENT_RING_OPCODE_MULTICAST_RULES: + case EVENT_RING_OPCODE_FILTERS_RULES: + case EVENT_RING_OPCODE_RSS_UPDATE_RULES: + cid = (elem->message.data.eth_event.echo & + BNX2X_SWCID_MASK); + DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); + break; + case EVENT_RING_OPCODE_VF_FLR: + abs_vfid = elem->message.data.vf_flr_event.vf_id; + DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", + abs_vfid); + goto get_vf; + case EVENT_RING_OPCODE_MALICIOUS_VF: + abs_vfid = elem->message.data.malicious_vf_event.vf_id; + BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", + abs_vfid, + elem->message.data.malicious_vf_event.err_id); + goto get_vf; + default: + return 1; + } + + /* check if the cid is the VF range */ + if (!bnx2x_iov_is_vf_cid(bp, cid)) { + DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); + return 1; + } + + /* extract vf and rxq index from vf_cid - relies on the following: + * 1. vfid on cid reflects the true abs_vfid + * 2. The max number of VFs (per path) is 64 + */ + qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); + abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); +get_vf: + vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); + + if (!vf) { + BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", + cid, abs_vfid); + return 0; + } + + switch (opcode) { + case EVENT_RING_OPCODE_CFC_DEL: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", + vf->abs_vfid, qidx); + vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, + &vfq_get(vf, + qidx)->sp_obj, + BNX2X_Q_CMD_CFC_DEL); + break; + case EVENT_RING_OPCODE_CLASSIFICATION_RULES: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", + vf->abs_vfid, qidx); + bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); + break; + case EVENT_RING_OPCODE_MULTICAST_RULES: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", + vf->abs_vfid, qidx); + bnx2x_vf_handle_mcast_eqe(bp, vf); + break; + case EVENT_RING_OPCODE_FILTERS_RULES: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", + vf->abs_vfid, qidx); + bnx2x_vf_handle_filters_eqe(bp, vf); + break; + case EVENT_RING_OPCODE_RSS_UPDATE_RULES: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", + vf->abs_vfid, qidx); + bnx2x_vf_handle_rss_update_eqe(bp, vf); + case EVENT_RING_OPCODE_VF_FLR: + case EVENT_RING_OPCODE_MALICIOUS_VF: + /* Do nothing for now */ + return 0; + } + + return 0; +} + +static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) +{ + /* extract the vf from vf_cid - relies on the following: + * 1. vfid on cid reflects the true abs_vfid + * 2. The max number of VFs (per path) is 64 + */ + int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); + return bnx2x_vf_by_abs_fid(bp, abs_vfid); +} + +void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, + struct bnx2x_queue_sp_obj **q_obj) +{ + struct bnx2x_virtf *vf; + + if (!IS_SRIOV(bp)) + return; + + vf = bnx2x_vf_by_cid(bp, vf_cid); + + if (vf) { + /* extract queue index from vf_cid - relies on the following: + * 1. vfid on cid reflects the true abs_vfid + * 2. The max number of VFs (per path) is 64 + */ + int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); + *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); + } else { + BNX2X_ERR("No vf matching cid %d\n", vf_cid); + } +} + +void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) +{ + int i; + int first_queue_query_index, num_queues_req; + dma_addr_t cur_data_offset; + struct stats_query_entry *cur_query_entry; + u8 stats_count = 0; + bool is_fcoe = false; + + if (!IS_SRIOV(bp)) + return; + + if (!NO_FCOE(bp)) + is_fcoe = true; + + /* fcoe adds one global request and one queue request */ + num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; + first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - + (is_fcoe ? 0 : 1); + + DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), + "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", + BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, + first_queue_query_index + num_queues_req); + + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, queue_stats) + + num_queues_req * sizeof(struct per_queue_stats); + + cur_query_entry = &bp->fw_stats_req-> + query[first_queue_query_index + num_queues_req]; + + for_each_vf(bp, i) { + int j; + struct bnx2x_virtf *vf = BP_VF(bp, i); + + if (vf->state != VF_ENABLED) { + DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), + "vf %d not enabled so no stats for it\n", + vf->abs_vfid); + continue; + } + + DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); + for_each_vfq(vf, j) { + struct bnx2x_vf_queue *rxq = vfq_get(vf, j); + + dma_addr_t q_stats_addr = + vf->fw_stat_map + j * vf->stats_stride; + + /* collect stats fro active queues only */ + if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == + BNX2X_Q_LOGICAL_STATE_STOPPED) + continue; + + /* create stats query entry for this queue */ + cur_query_entry->kind = STATS_TYPE_QUEUE; + cur_query_entry->index = vfq_stat_id(vf, rxq); + cur_query_entry->funcID = + cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); + cur_query_entry->address.hi = + cpu_to_le32(U64_HI(q_stats_addr)); + cur_query_entry->address.lo = + cpu_to_le32(U64_LO(q_stats_addr)); + DP(BNX2X_MSG_IOV, + "added address %x %x for vf %d queue %d client %d\n", + cur_query_entry->address.hi, + cur_query_entry->address.lo, cur_query_entry->funcID, + j, cur_query_entry->index); + cur_query_entry++; + cur_data_offset += sizeof(struct per_queue_stats); + stats_count++; + + /* all stats are coalesced to the leading queue */ + if (vf->cfg_flags & VF_CFG_STATS_COALESCE) + break; + } + } + bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; +} + +static inline +struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) +{ + int i; + struct bnx2x_virtf *vf = NULL; + + for_each_vf(bp, i) { + vf = BP_VF(bp, i); + if (stat_id >= vf->igu_base_id && + stat_id < vf->igu_base_id + vf_sb_count(vf)) + break; + } + return vf; +} + +/* VF API helpers */ +static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, + u8 enable) +{ + u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; + u32 val = enable ? (abs_vfid | (1 << 6)) : 0; + + REG_WR(bp, reg, val); +} + +static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int i; + + for_each_vfq(vf, i) + bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, + vfq_qzone_id(vf, vfq_get(vf, i)), false); +} + +static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + u32 val; + + /* clear the VF configuration - pretend */ + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); + val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); + val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | + IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); + REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); +} + +u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), + BNX2X_VF_MAX_QUEUES); +} + +static +int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vf_pf_resc_request *req_resc) +{ + u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); + u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); + + /* Save a vlan filter for the Hypervisor */ + return ((req_resc->num_rxqs <= rxq_cnt) && + (req_resc->num_txqs <= txq_cnt) && + (req_resc->num_sbs <= vf_sb_count(vf)) && + (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && + (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf))); +} + +/* CORE VF API */ +int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vf_pf_resc_request *resc) +{ + int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * + BNX2X_CIDS_PER_VF; + + union cdu_context *base_cxt = (union cdu_context *) + BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + + (base_vf_cid & (ILT_PAGE_CIDS-1)); + int i; + + /* if state is 'acquired' the VF was not released or FLR'd, in + * this case the returned resources match the acquired already + * acquired resources. Verify that the requested numbers do + * not exceed the already acquired numbers. + */ + if (vf->state == VF_ACQUIRED) { + DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", + vf->abs_vfid); + + if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { + BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", + vf->abs_vfid); + return -EINVAL; + } + return 0; + } + + /* Otherwise vf state must be 'free' or 'reset' */ + if (vf->state != VF_FREE && vf->state != VF_RESET) { + BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", + vf->abs_vfid, vf->state); + return -EINVAL; + } + + /* static allocation: + * the global maximum number are fixed per VF. Fail the request if + * requested number exceed these globals + */ + if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { + DP(BNX2X_MSG_IOV, + "cannot fulfill vf resource request. Placing maximal available values in response\n"); + /* set the max resource in the vf */ + return -ENOMEM; + } + + /* Set resources counters - 0 request means max available */ + vf_sb_count(vf) = resc->num_sbs; + vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); + vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); + if (resc->num_mac_filters) + vf_mac_rules_cnt(vf) = resc->num_mac_filters; + /* Add an additional vlan filter credit for the hypervisor */ + bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1); + + DP(BNX2X_MSG_IOV, + "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", + vf_sb_count(vf), vf_rxq_count(vf), + vf_txq_count(vf), vf_mac_rules_cnt(vf), + vf_vlan_rules_visible_cnt(vf)); + + /* Initialize the queues */ + if (!vf->vfqs) { + DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); + return -EINVAL; + } + + for_each_vfq(vf, i) { + struct bnx2x_vf_queue *q = vfq_get(vf, i); + + if (!q) { + BNX2X_ERR("q number %d was not allocated\n", i); + return -EINVAL; + } + + q->index = i; + q->cxt = &((base_cxt + i)->eth); + q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; + + DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", + vf->abs_vfid, i, q->index, q->cid, q->cxt); + + /* init SP objects */ + bnx2x_vfq_init(bp, vf, q); + } + vf->state = VF_ACQUIRED; + return 0; +} + +int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) +{ + struct bnx2x_func_init_params func_init = {0}; + u16 flags = 0; + int i; + + /* the sb resources are initialized at this point, do the + * FW/HW initializations + */ + for_each_vf_sb(vf, i) + bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, + vf_igu_sb(vf, i), vf_igu_sb(vf, i)); + + /* Sanity checks */ + if (vf->state != VF_ACQUIRED) { + DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", + vf->abs_vfid, vf->state); + return -EINVAL; + } + + /* let FLR complete ... */ + msleep(100); + + /* FLR cleanup epilogue */ + if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) + return -EBUSY; + + /* reset IGU VF statistics: MSIX */ + REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); + + /* vf init */ + if (vf->cfg_flags & VF_CFG_STATS) + flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); + + if (vf->cfg_flags & VF_CFG_TPA) + flags |= FUNC_FLG_TPA; + + if (is_vf_multi(vf)) + flags |= FUNC_FLG_RSS; + + /* function setup */ + func_init.func_flgs = flags; + func_init.pf_id = BP_FUNC(bp); + func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); + func_init.fw_stat_map = vf->fw_stat_map; + func_init.spq_map = vf->spq_map; + func_init.spq_prod = 0; + bnx2x_func_init(bp, &func_init); + + /* Enable the vf */ + bnx2x_vf_enable_access(bp, vf->abs_vfid); + bnx2x_vf_enable_traffic(bp, vf); + + /* queue protection table */ + for_each_vfq(vf, i) + bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, + vfq_qzone_id(vf, vfq_get(vf, i)), true); + + vf->state = VF_ENABLED; + + /* update vf bulletin board */ + bnx2x_post_vf_bulletin(bp, vf->index); + + return 0; +} + +struct set_vf_state_cookie { + struct bnx2x_virtf *vf; + u8 state; +}; + +static void bnx2x_set_vf_state(void *cookie) +{ + struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; + + p->vf->state = p->state; +} + +int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int rc = 0, i; + + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + /* Close all queues */ + for (i = 0; i < vf_rxq_count(vf); i++) { + rc = bnx2x_vf_queue_teardown(bp, vf, i); + if (rc) + goto op_err; + } + + /* disable the interrupts */ + DP(BNX2X_MSG_IOV, "disabling igu\n"); + bnx2x_vf_igu_disable(bp, vf); + + /* disable the VF */ + DP(BNX2X_MSG_IOV, "clearing qtbl\n"); + bnx2x_vf_clr_qtbl(bp, vf); + + /* need to make sure there are no outstanding stats ramrods which may + * cause the device to access the VF's stats buffer which it will free + * as soon as we return from the close flow. + */ + { + struct set_vf_state_cookie cookie; + + cookie.vf = vf; + cookie.state = VF_ACQUIRED; + bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); + } + + DP(BNX2X_MSG_IOV, "set state to acquired\n"); + + return 0; +op_err: + BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc); + return rc; +} + +/* VF release can be called either: 1. The VF was acquired but + * not enabled 2. the vf was enabled or in the process of being + * enabled + */ +int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int rc; + + DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, + vf->state == VF_FREE ? "Free" : + vf->state == VF_ACQUIRED ? "Acquired" : + vf->state == VF_ENABLED ? "Enabled" : + vf->state == VF_RESET ? "Reset" : + "Unknown"); + + switch (vf->state) { + case VF_ENABLED: + rc = bnx2x_vf_close(bp, vf); + if (rc) + goto op_err; + /* Fallthrough to release resources */ + case VF_ACQUIRED: + DP(BNX2X_MSG_IOV, "about to free resources\n"); + bnx2x_vf_free_resc(bp, vf); + break; + + case VF_FREE: + case VF_RESET: + default: + break; + } + return 0; +op_err: + BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc); + return rc; +} + +int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_config_rss_params *rss) +{ + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags); + return bnx2x_config_rss(bp, rss); +} + +int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vfpf_tpa_tlv *tlv, + struct bnx2x_queue_update_tpa_params *params) +{ + aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr; + struct bnx2x_queue_state_params qstate; + int qid, rc = 0; + + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + /* Set ramrod params */ + memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); + memcpy(&qstate.params.update_tpa, params, + sizeof(struct bnx2x_queue_update_tpa_params)); + qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA; + set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); + + for (qid = 0; qid < vf_rxq_count(vf); qid++) { + qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + qstate.params.update_tpa.sge_map = sge_addr[qid]; + DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n", + vf->abs_vfid, qid, U64_HI(sge_addr[qid]), + U64_LO(sge_addr[qid])); + rc = bnx2x_queue_state_change(bp, &qstate); + if (rc) { + BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n", + U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]), + vf->abs_vfid, qid); + return rc; + } + } + + return rc; +} + +/* VF release ~ VF close + VF release-resources + * Release is the ultimate SW shutdown and is called whenever an + * irrecoverable error is encountered. + */ +int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int rc; + + DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); + bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); + + rc = bnx2x_vf_free(bp, vf); + if (rc) + WARN(rc, + "VF[%d] Failed to allocate resources for release op- rc=%d\n", + vf->abs_vfid, rc); + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); + return rc; +} + +static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, + struct bnx2x_virtf *vf, u32 *sbdf) +{ + *sbdf = vf->devfn | (vf->bus << 8); +} + +void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, + enum channel_tlvs tlv) +{ + /* we don't lock the channel for unsupported tlvs */ + if (!bnx2x_tlv_supported(tlv)) { + BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); + return; + } + + /* lock the channel */ + mutex_lock(&vf->op_mutex); + + /* record the locking op */ + vf->op_current = tlv; + + /* log the lock */ + DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", + vf->abs_vfid, tlv); +} + +void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, + enum channel_tlvs expected_tlv) +{ + enum channel_tlvs current_tlv; + + if (!vf) { + BNX2X_ERR("VF was %p\n", vf); + return; + } + + current_tlv = vf->op_current; + + /* we don't unlock the channel for unsupported tlvs */ + if (!bnx2x_tlv_supported(expected_tlv)) + return; + + WARN(expected_tlv != vf->op_current, + "lock mismatch: expected %d found %d", expected_tlv, + vf->op_current); + + /* record the locking op */ + vf->op_current = CHANNEL_TLV_NONE; + + /* lock the channel */ + mutex_unlock(&vf->op_mutex); + + /* log the unlock */ + DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", + vf->abs_vfid, vf->op_current); +} + +static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) +{ + struct bnx2x_queue_state_params q_params; + u32 prev_flags; + int i, rc; + + /* Verify changes are needed and record current Tx switching state */ + prev_flags = bp->flags; + if (enable) + bp->flags |= TX_SWITCHING; + else + bp->flags &= ~TX_SWITCHING; + if (prev_flags == bp->flags) + return 0; + + /* Verify state enables the sending of queue ramrods */ + if ((bp->state != BNX2X_STATE_OPEN) || + (bnx2x_get_q_logical_state(bp, + &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != + BNX2X_Q_LOGICAL_STATE_ACTIVE)) + return 0; + + /* send q. update ramrod to configure Tx switching */ + memset(&q_params, 0, sizeof(q_params)); + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, + &q_params.params.update.update_flags); + if (enable) + __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING, + &q_params.params.update.update_flags); + else + __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING, + &q_params.params.update.update_flags); + + /* send the ramrod on all the queues of the PF */ + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + /* Set the appropriate Queue object */ + q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to configure Tx switching\n"); + return rc; + } + } + + DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled"); + return 0; +} + +int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) +{ + struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); + + if (!IS_SRIOV(bp)) { + BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); + return -EINVAL; + } + + DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", + num_vfs_param, BNX2X_NR_VIRTFN(bp)); + + /* HW channel is only operational when PF is up */ + if (bp->state != BNX2X_STATE_OPEN) { + BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); + return -EINVAL; + } + + /* we are always bound by the total_vfs in the configuration space */ + if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { + BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", + num_vfs_param, BNX2X_NR_VIRTFN(bp)); + num_vfs_param = BNX2X_NR_VIRTFN(bp); + } + + bp->requested_nr_virtfn = num_vfs_param; + if (num_vfs_param == 0) { + bnx2x_set_pf_tx_switching(bp, false); + pci_disable_sriov(dev); + return 0; + } else { + return bnx2x_enable_sriov(bp); + } +} + +#define IGU_ENTRY_SIZE 4 + +int bnx2x_enable_sriov(struct bnx2x *bp) +{ + int rc = 0, req_vfs = bp->requested_nr_virtfn; + int vf_idx, sb_idx, vfq_idx, qcount, first_vf; + u32 igu_entry, address; + u16 num_vf_queues; + + if (req_vfs == 0) + return 0; + + first_vf = bp->vfdb->sriov.first_vf_in_pf; + + /* statically distribute vf sb pool between VFs */ + num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, + BP_VFDB(bp)->vf_sbs_pool / req_vfs); + + /* zero previous values learned from igu cam */ + for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + + vf->sb_count = 0; + vf_sb_count(BP_VF(bp, vf_idx)) = 0; + } + bp->vfdb->vf_sbs_pool = 0; + + /* prepare IGU cam */ + sb_idx = BP_VFDB(bp)->first_vf_igu_entry; + address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; + for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { + for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { + igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | + vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | + IGU_REG_MAPPING_MEMORY_VALID; + DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", + sb_idx, vf_idx); + REG_WR(bp, address, igu_entry); + sb_idx++; + address += IGU_ENTRY_SIZE; + } + } + + /* Reinitialize vf database according to igu cam */ + bnx2x_get_vf_igu_cam_info(bp); + + DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", + BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); + + qcount = 0; + for_each_vf(bp, vf_idx) { + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + + /* set local queue arrays */ + vf->vfqs = &bp->vfdb->vfqs[qcount]; + qcount += vf_sb_count(vf); + bnx2x_iov_static_resc(bp, vf); + } + + /* prepare msix vectors in VF configuration space - the value in the + * PCI configuration space should be the index of the last entry, + * namely one less than the actual size of the table + */ + for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); + REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, + num_vf_queues - 1); + DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", + vf_idx, num_vf_queues - 1); + } + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + /* enable sriov. This will probe all the VFs, and consequentially cause + * the "acquire" messages to appear on the VF PF channel. + */ + DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); + bnx2x_disable_sriov(bp); + + rc = bnx2x_set_pf_tx_switching(bp, true); + if (rc) + return rc; + + rc = pci_enable_sriov(bp->pdev, req_vfs); + if (rc) { + BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); + return rc; + } + DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); + return req_vfs; +} + +void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) +{ + int vfidx; + struct pf_vf_bulletin_content *bulletin; + + DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); + for_each_vf(bp, vfidx) { + bulletin = BP_VF_BULLETIN(bp, vfidx); + if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) + bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); + } +} + +void bnx2x_disable_sriov(struct bnx2x *bp) +{ + pci_disable_sriov(bp->pdev); +} + +static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, + struct bnx2x_virtf **vf, + struct pf_vf_bulletin_content **bulletin) +{ + if (bp->state != BNX2X_STATE_OPEN) { + BNX2X_ERR("vf ndo called though PF is down\n"); + return -EINVAL; + } + + if (!IS_SRIOV(bp)) { + BNX2X_ERR("vf ndo called though sriov is disabled\n"); + return -EINVAL; + } + + if (vfidx >= BNX2X_NR_VIRTFN(bp)) { + BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", + vfidx, BNX2X_NR_VIRTFN(bp)); + return -EINVAL; + } + + /* init members */ + *vf = BP_VF(bp, vfidx); + *bulletin = BP_VF_BULLETIN(bp, vfidx); + + if (!*vf) { + BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n", + vfidx); + return -EINVAL; + } + + if (!(*vf)->vfqs) { + BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n", + vfidx); + return -EINVAL; + } + + if (!*bulletin) { + BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", + vfidx); + return -EINVAL; + } + + return 0; +} + +int bnx2x_get_vf_config(struct net_device *dev, int vfidx, + struct ifla_vf_info *ivi) +{ + struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_virtf *vf = NULL; + struct pf_vf_bulletin_content *bulletin = NULL; + struct bnx2x_vlan_mac_obj *mac_obj; + struct bnx2x_vlan_mac_obj *vlan_obj; + int rc; + + /* sanity and init */ + rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); + if (rc) + return rc; + mac_obj = &bnx2x_leading_vfq(vf, mac_obj); + vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); + if (!mac_obj || !vlan_obj) { + BNX2X_ERR("VF partially initialized\n"); + return -EINVAL; + } + + ivi->vf = vfidx; + ivi->qos = 0; + ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */ + ivi->min_tx_rate = 0; + ivi->spoofchk = 1; /*always enabled */ + if (vf->state == VF_ENABLED) { + /* mac and vlan are in vlan_mac objects */ + if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { + mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, + 0, ETH_ALEN); + vlan_obj->get_n_elements(bp, vlan_obj, 1, + (u8 *)&ivi->vlan, 0, + VLAN_HLEN); + } + } else { + /* mac */ + if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) + /* mac configured by ndo so its in bulletin board */ + memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); + else + /* function has not been loaded yet. Show mac as 0s */ + memset(&ivi->mac, 0, ETH_ALEN); + + /* vlan */ + if (bulletin->valid_bitmap & (1 << VLAN_VALID)) + /* vlan configured by ndo so its in bulletin board */ + memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); + else + /* function has not been loaded yet. Show vlans as 0s */ + memset(&ivi->vlan, 0, VLAN_HLEN); + } + + return 0; +} + +/* New mac for VF. Consider these cases: + * 1. VF hasn't been acquired yet - save the mac in local bulletin board and + * supply at acquire. + * 2. VF has already been acquired but has not yet initialized - store in local + * bulletin board. mac will be posted on VF bulletin board after VF init. VF + * will configure this mac when it is ready. + * 3. VF has already initialized but has not yet setup a queue - post the new + * mac on VF's bulletin board right now. VF will configure this mac when it + * is ready. + * 4. VF has already set a queue - delete any macs already configured for this + * queue and manually config the new mac. + * In any event, once this function has been called refuse any attempts by the + * VF to configure any mac for itself except for this mac. In case of a race + * where the VF fails to see the new post on its bulletin board before sending a + * mac configuration request, the PF will simply fail the request and VF can try + * again after consulting its bulletin board. + */ +int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) +{ + struct bnx2x *bp = netdev_priv(dev); + int rc, q_logical_state; + struct bnx2x_virtf *vf = NULL; + struct pf_vf_bulletin_content *bulletin = NULL; + + /* sanity and init */ + rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); + if (rc) + return rc; + if (!is_valid_ether_addr(mac)) { + BNX2X_ERR("mac address invalid\n"); + return -EINVAL; + } + + /* update PF's copy of the VF's bulletin. Will no longer accept mac + * configuration requests from vf unless match this mac + */ + bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; + memcpy(bulletin->mac, mac, ETH_ALEN); + + /* Post update on VF's bulletin board */ + rc = bnx2x_post_vf_bulletin(bp, vfidx); + if (rc) { + BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); + return rc; + } + + q_logical_state = + bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); + if (vf->state == VF_ENABLED && + q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { + /* configure the mac in device on this vf's queue */ + unsigned long ramrod_flags = 0; + struct bnx2x_vlan_mac_obj *mac_obj; + + /* User should be able to see failure reason in system logs */ + if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) + return -EINVAL; + + /* must lock vfpf channel to protect against vf flows */ + bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); + + /* remove existing eth macs */ + mac_obj = &bnx2x_leading_vfq(vf, mac_obj); + rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); + if (rc) { + BNX2X_ERR("failed to delete eth macs\n"); + rc = -EINVAL; + goto out; + } + + /* remove existing uc list macs */ + rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); + if (rc) { + BNX2X_ERR("failed to delete uc_list macs\n"); + rc = -EINVAL; + goto out; + } + + /* configure the new mac to device */ + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, + BNX2X_ETH_MAC, &ramrod_flags); + +out: + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); + } + + return rc; +} + +int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) +{ + struct bnx2x_queue_state_params q_params = {NULL}; + struct bnx2x_vlan_mac_ramrod_params ramrod_param; + struct bnx2x_queue_update_params *update_params; + struct pf_vf_bulletin_content *bulletin = NULL; + struct bnx2x_rx_mode_ramrod_params rx_ramrod; + struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_vlan_mac_obj *vlan_obj; + unsigned long vlan_mac_flags = 0; + unsigned long ramrod_flags = 0; + struct bnx2x_virtf *vf = NULL; + unsigned long accept_flags; + int rc; + + /* sanity and init */ + rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); + if (rc) + return rc; + + if (vlan > 4095) { + BNX2X_ERR("illegal vlan value %d\n", vlan); + return -EINVAL; + } + + DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", + vfidx, vlan, 0); + + /* update PF's copy of the VF's bulletin. No point in posting the vlan + * to the VF since it doesn't have anything to do with it. But it useful + * to store it here in case the VF is not up yet and we can only + * configure the vlan later when it does. Treat vlan id 0 as remove the + * Host tag. + */ + if (vlan > 0) + bulletin->valid_bitmap |= 1 << VLAN_VALID; + else + bulletin->valid_bitmap &= ~(1 << VLAN_VALID); + bulletin->vlan = vlan; + + /* is vf initialized and queue set up? */ + if (vf->state != VF_ENABLED || + bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != + BNX2X_Q_LOGICAL_STATE_ACTIVE) + return rc; + + /* User should be able to see error in system logs */ + if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) + return -EINVAL; + + /* must lock vfpf channel to protect against vf flows */ + bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); + + /* remove existing vlans */ + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); + rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc) { + BNX2X_ERR("failed to delete vlans\n"); + rc = -EINVAL; + goto out; + } + + /* need to remove/add the VF's accept_any_vlan bit */ + accept_flags = bnx2x_leading_vfq(vf, accept_flags); + if (vlan) + clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + else + set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + + bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, + accept_flags); + bnx2x_leading_vfq(vf, accept_flags) = accept_flags; + bnx2x_config_rx_mode(bp, &rx_ramrod); + + /* configure the new vlan to device */ + memset(&ramrod_param, 0, sizeof(ramrod_param)); + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + ramrod_param.vlan_mac_obj = vlan_obj; + ramrod_param.ramrod_flags = ramrod_flags; + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &ramrod_param.user_req.vlan_mac_flags); + ramrod_param.user_req.u.vlan.vlan = vlan; + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; + rc = bnx2x_config_vlan_mac(bp, &ramrod_param); + if (rc) { + BNX2X_ERR("failed to configure vlan\n"); + rc = -EINVAL; + goto out; + } + + /* send queue update ramrod to configure default vlan and silent + * vlan removal + */ + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); + update_params = &q_params.params.update; + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, + &update_params->update_flags); + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + &update_params->update_flags); + if (vlan == 0) { + /* if vlan is 0 then we want to leave the VF traffic + * untagged, and leave the incoming traffic untouched + * (i.e. do not remove any vlan tags). + */ + __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, + &update_params->update_flags); + __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &update_params->update_flags); + } else { + /* configure default vlan to vf queue and set silent + * vlan removal (the vf remains unaware of this vlan). + */ + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, + &update_params->update_flags); + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &update_params->update_flags); + update_params->def_vlan = vlan; + update_params->silent_removal_value = + vlan & VLAN_VID_MASK; + update_params->silent_removal_mask = VLAN_VID_MASK; + } + + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to configure default VLAN\n"); + goto out; + } + + + /* clear the flag indicating that this VF needs its vlan + * (will only be set if the HV configured the Vlan before vf was + * up and we were called because the VF came up later + */ +out: + vf->cfg_flags &= ~VF_CFG_VLAN; + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); + + return rc; +} + +/* crc is the first field in the bulletin board. Compute the crc over the + * entire bulletin board excluding the crc field itself. Use the length field + * as the Bulletin Board was posted by a PF with possibly a different version + * from the vf which will sample it. Therefore, the length is computed by the + * PF and the used blindly by the VF. + */ +u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, + struct pf_vf_bulletin_content *bulletin) +{ + return crc32(BULLETIN_CRC_SEED, + ((u8 *)bulletin) + sizeof(bulletin->crc), + bulletin->length - sizeof(bulletin->crc)); +} + +/* Check for new posts on the bulletin board */ +enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) +{ + struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; + int attempts; + + /* bulletin board hasn't changed since last sample */ + if (bp->old_bulletin.version == bulletin.version) + return PFVF_BULLETIN_UNCHANGED; + + /* validate crc of new bulletin board */ + if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { + /* sampling structure in mid post may result with corrupted data + * validate crc to ensure coherency. + */ + for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { + bulletin = bp->pf2vf_bulletin->content; + if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, + &bulletin)) + break; + BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", + bulletin.crc, + bnx2x_crc_vf_bulletin(bp, &bulletin)); + } + if (attempts >= BULLETIN_ATTEMPTS) { + BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", + attempts); + return PFVF_BULLETIN_CRC_ERR; + } + } + + /* the mac address in bulletin board is valid and is new */ + if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && + !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) { + /* update new mac to net device */ + memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); + } + + /* the vlan in bulletin board is valid and is new */ + if (bulletin.valid_bitmap & 1 << VLAN_VALID) + memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); + + /* copy new bulletin board to bp */ + bp->old_bulletin = bulletin; + + return PFVF_BULLETIN_UPDATED; +} + +void bnx2x_timer_sriov(struct bnx2x *bp) +{ + bnx2x_sample_bulletin(bp); + + /* if channel is down we need to self destruct */ + if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, + BNX2X_MSG_IOV); +} + +void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) +{ + /* vf doorbells are embedded within the regview */ + return bp->regview + PXP_VF_ADDR_DB_START; +} + +void bnx2x_vf_pci_dealloc(struct bnx2x *bp) +{ + BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, + sizeof(struct bnx2x_vf_mbx_msg)); + BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, + sizeof(union pf_vf_bulletin)); +} + +int bnx2x_vf_pci_alloc(struct bnx2x *bp) +{ + mutex_init(&bp->vf2pf_mutex); + + /* allocate vf2pf mailbox for vf to pf channel */ + bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, + sizeof(struct bnx2x_vf_mbx_msg)); + if (!bp->vf2pf_mbox) + goto alloc_mem_err; + + /* allocate pf 2 vf bulletin board */ + bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, + sizeof(union pf_vf_bulletin)); + if (!bp->pf2vf_bulletin) + goto alloc_mem_err; + + return 0; + +alloc_mem_err: + bnx2x_vf_pci_dealloc(bp); + return -ENOMEM; +} + +void bnx2x_iov_channel_down(struct bnx2x *bp) +{ + int vf_idx; + struct pf_vf_bulletin_content *bulletin; + + if (!IS_SRIOV(bp)) + return; + + for_each_vf(bp, vf_idx) { + /* locate this VFs bulletin board and update the channel down + * bit + */ + bulletin = BP_VF_BULLETIN(bp, vf_idx); + bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; + + /* update vf bulletin board */ + bnx2x_post_vf_bulletin(bp, vf_idx); + } +} + +void bnx2x_iov_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); + + if (!netif_running(bp->dev)) + return; + + if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR, + &bp->iov_task_state)) + bnx2x_vf_handle_flr_event(bp); + + if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG, + &bp->iov_task_state)) + bnx2x_vf_mbx(bp); +} + +void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) +{ + smp_mb__before_atomic(); + set_bit(flag, &bp->iov_task_state); + smp_mb__after_atomic(); + DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); + queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h new file mode 100644 index 00000000000..96c575e147a --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -0,0 +1,584 @@ +/* bnx2x_sriov.h: Broadcom Everest network driver. + * + * Copyright 2009-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> + * Written by: Shmulik Ravid + * Ariel Elior <ariel.elior@qlogic.com> + */ +#ifndef BNX2X_SRIOV_H +#define BNX2X_SRIOV_H + +#include "bnx2x_vfpf.h" +#include "bnx2x.h" + +enum sample_bulletin_result { + PFVF_BULLETIN_UNCHANGED, + PFVF_BULLETIN_UPDATED, + PFVF_BULLETIN_CRC_ERR +}; + +#ifdef CONFIG_BNX2X_SRIOV + +extern struct workqueue_struct *bnx2x_iov_wq; + +/* The bnx2x device structure holds vfdb structure described below. + * The VF array is indexed by the relative vfid. + */ +#define BNX2X_VF_MAX_QUEUES 16 +#define BNX2X_VF_MAX_TPA_AGG_QUEUES 8 + +struct bnx2x_sriov { + u32 first_vf_in_pf; + + /* standard SRIOV capability fields, mostly for debugging */ + int pos; /* capability position */ + int nres; /* number of resources */ + u32 cap; /* SR-IOV Capabilities */ + u16 ctrl; /* SR-IOV Control */ + u16 total; /* total VFs associated with the PF */ + u16 initial; /* initial VFs associated with the PF */ + u16 nr_virtfn; /* number of VFs available */ + u16 offset; /* first VF Routing ID offset */ + u16 stride; /* following VF stride */ + u32 pgsz; /* page size for BAR alignment */ + u8 link; /* Function Dependency Link */ +}; + +/* bars */ +struct bnx2x_vf_bar { + u64 bar; + u32 size; +}; + +struct bnx2x_vf_bar_info { + struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS]; + u8 nr_bars; +}; + +/* vf queue (used both for rx or tx) */ +struct bnx2x_vf_queue { + struct eth_context *cxt; + + /* MACs object */ + struct bnx2x_vlan_mac_obj mac_obj; + + /* VLANs object */ + struct bnx2x_vlan_mac_obj vlan_obj; + atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ + unsigned long accept_flags; /* last accept flags configured */ + + /* Queue Slow-path State object */ + struct bnx2x_queue_sp_obj sp_obj; + + u32 cid; + u16 index; + u16 sb_idx; + bool is_leading; + bool sp_initialized; +}; + +/* struct bnx2x_vf_queue_construct_params - prepare queue construction + * parameters: q-init, q-setup and SB index + */ +struct bnx2x_vf_queue_construct_params { + struct bnx2x_queue_state_params qstate; + struct bnx2x_queue_setup_params prep_qsetup; +}; + +/* forward */ +struct bnx2x_virtf; + +/* VFOP definitions */ + +struct bnx2x_vf_mac_vlan_filter { + int type; +#define BNX2X_VF_FILTER_MAC 1 +#define BNX2X_VF_FILTER_VLAN 2 + + bool add; + u8 *mac; + u16 vid; +}; + +struct bnx2x_vf_mac_vlan_filters { + int count; + struct bnx2x_vf_mac_vlan_filter filters[]; +}; + +/* vf context */ +struct bnx2x_virtf { + u16 cfg_flags; +#define VF_CFG_STATS 0x0001 +#define VF_CFG_FW_FC 0x0002 +#define VF_CFG_TPA 0x0004 +#define VF_CFG_INT_SIMD 0x0008 +#define VF_CACHE_LINE 0x0010 +#define VF_CFG_VLAN 0x0020 +#define VF_CFG_STATS_COALESCE 0x0040 + + u8 state; +#define VF_FREE 0 /* VF ready to be acquired holds no resc */ +#define VF_ACQUIRED 1 /* VF acquired, but not initialized */ +#define VF_ENABLED 2 /* VF Enabled */ +#define VF_RESET 3 /* VF FLR'd, pending cleanup */ + + bool flr_clnup_stage; /* true during flr cleanup */ + + /* dma */ + dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ + u16 stats_stride; + dma_addr_t spq_map; + dma_addr_t bulletin_map; + + /* Allocated resources counters. Before the VF is acquired, the + * counters hold the following values: + * + * - xxq_count = 0 as the queues memory is not allocated yet. + * + * - sb_count = The number of status blocks configured for this VF in + * the IGU CAM. Initially read during probe. + * + * - xx_rules_count = The number of rules statically and equally + * allocated for each VF, during PF load. + */ + struct vf_pf_resc_request alloc_resc; +#define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs) +#define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs) +#define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs) +#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) +#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) +#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) + /* Hide a single vlan filter credit for the hypervisor */ +#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1) + + u8 sb_count; /* actual number of SBs */ + u8 igu_base_id; /* base igu status block id */ + + struct bnx2x_vf_queue *vfqs; +#define LEADING_IDX 0 +#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX) +#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) +#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var) + + u8 index; /* index in the vf array */ + u8 abs_vfid; + u8 sp_cl_id; + u32 error; /* 0 means all's-well */ + + /* BDF */ + unsigned int bus; + unsigned int devfn; + + /* bars */ + struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS]; + + /* set-mac ramrod state 1-pending, 0-done */ + unsigned long filter_state; + + /* leading rss client id ~~ the client id of the first rxq, must be + * set for each txq. + */ + int leading_rss; + + /* MCAST object */ + int mcast_list_len; + struct bnx2x_mcast_obj mcast_obj; + + /* RSS configuration object */ + struct bnx2x_rss_config_obj rss_conf_obj; + + /* slow-path operations */ + struct mutex op_mutex; /* one vfop at a time mutex */ + enum channel_tlvs op_current; +}; + +#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn) + +#define for_each_vf(bp, var) \ + for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++) + +#define for_each_vfq(vf, var) \ + for ((var) = 0; (var) < vf_rxq_count(vf); (var)++) + +#define for_each_vf_sb(vf, var) \ + for ((var) = 0; (var) < vf_sb_count(vf); (var)++) + +#define is_vf_multi(vf) (vf_rxq_count(vf) > 1) + +#define HW_VF_HANDLE(bp, abs_vfid) \ + (u16)(BP_ABS_FUNC((bp)) | (1<<3) | ((u16)(abs_vfid) << 4)) + +#define FW_PF_MAX_HANDLE 8 + +#define FW_VF_HANDLE(abs_vfid) \ + (abs_vfid + FW_PF_MAX_HANDLE) + +/* locking and unlocking the channel mutex */ +void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, + enum channel_tlvs tlv); + +void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, + enum channel_tlvs expected_tlv); + +/* VF mail box (aka vf-pf channel) */ + +/* a container for the bi-directional vf<-->pf messages. + * The actual response will be placed according to the offset parameter + * provided in the request + */ + +#define MBX_MSG_ALIGN 8 +#define MBX_MSG_ALIGNED_SIZE (roundup(sizeof(struct bnx2x_vf_mbx_msg), \ + MBX_MSG_ALIGN)) + +struct bnx2x_vf_mbx_msg { + union vfpf_tlvs req; + union pfvf_tlvs resp; +}; + +struct bnx2x_vf_mbx { + struct bnx2x_vf_mbx_msg *msg; + dma_addr_t msg_mapping; + + /* VF GPA address */ + u32 vf_addr_lo; + u32 vf_addr_hi; + + struct vfpf_first_tlv first_tlv; /* saved VF request header */ +}; + +struct bnx2x_vf_sp { + union { + struct eth_classify_rules_ramrod_data e2; + } mac_rdata; + + union { + struct eth_classify_rules_ramrod_data e2; + } vlan_rdata; + + union { + struct eth_filter_rules_ramrod_data e2; + } rx_mode_rdata; + + union { + struct eth_multicast_rules_ramrod_data e2; + } mcast_rdata; + + union { + struct client_init_ramrod_data init_data; + struct client_update_ramrod_data update_data; + } q_data; + + union { + struct eth_rss_update_ramrod_data e2; + } rss_rdata; +}; + +struct hw_dma { + void *addr; + dma_addr_t mapping; + size_t size; +}; + +struct bnx2x_vfdb { +#define BP_VFDB(bp) ((bp)->vfdb) + /* vf array */ + struct bnx2x_virtf *vfs; +#define BP_VF(bp, idx) (&((bp)->vfdb->vfs[(idx)])) +#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[(idx)].var) + + /* queue array - for all vfs */ + struct bnx2x_vf_queue *vfqs; + + /* vf HW contexts */ + struct hw_dma context[BNX2X_VF_CIDS/ILT_PAGE_CIDS]; +#define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[(i)]) + + /* SR-IOV information */ + struct bnx2x_sriov sriov; + struct hw_dma mbx_dma; +#define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma)) + struct bnx2x_vf_mbx mbxs[BNX2X_MAX_NUM_OF_VFS]; +#define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[(vfid)])) + + struct hw_dma bulletin_dma; +#define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma)) +#define BP_VF_BULLETIN(bp, vf) \ + (((struct pf_vf_bulletin_content *)(BP_VF_BULLETIN_DMA(bp)->addr)) \ + + (vf)) + + struct hw_dma sp_dma; +#define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr + \ + (vf)->index * sizeof(struct bnx2x_vf_sp) + \ + offsetof(struct bnx2x_vf_sp, field)) +#define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \ + (vf)->index * sizeof(struct bnx2x_vf_sp) + \ + offsetof(struct bnx2x_vf_sp, field)) + +#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32) + u32 flrd_vfs[FLRD_VFS_DWORDS]; + + /* the number of msix vectors belonging to this PF designated for VFs */ + u16 vf_sbs_pool; + u16 first_vf_igu_entry; + + /* sp_rtnl synchronization */ + struct mutex event_mutex; + u64 event_occur; +}; + +/* queue access */ +static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index) +{ + return &(vf->vfqs[index]); +} + +/* FW ids */ +static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) +{ + return vf->igu_base_id + sb_idx; +} + +static inline u8 vf_hc_qzone(struct bnx2x_virtf *vf, u16 sb_idx) +{ + return vf_igu_sb(vf, sb_idx); +} + +static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) +{ + return vf->igu_base_id + q->index; +} + +static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) +{ + if (vf->cfg_flags & VF_CFG_STATS_COALESCE) + return vf->leading_rss; + else + return vfq_cl_id(vf, q); +} + +static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) +{ + return vfq_cl_id(vf, q); +} + +/* global iov routines */ +int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line); +int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param); +void bnx2x_iov_remove_one(struct bnx2x *bp); +void bnx2x_iov_free_mem(struct bnx2x *bp); +int bnx2x_iov_alloc_mem(struct bnx2x *bp); +int bnx2x_iov_nic_init(struct bnx2x *bp); +int bnx2x_iov_chip_cleanup(struct bnx2x *bp); +void bnx2x_iov_init_dq(struct bnx2x *bp); +void bnx2x_iov_init_dmae(struct bnx2x *bp); +void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, + struct bnx2x_queue_sp_obj **q_obj); +int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); +void bnx2x_iov_adjust_stats_req(struct bnx2x *bp); +void bnx2x_iov_storm_stats_update(struct bnx2x *bp); +/* global vf mailbox routines */ +void bnx2x_vf_mbx(struct bnx2x *bp); +void bnx2x_vf_mbx_schedule(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event); +void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid); + +/* CORE VF API */ +typedef u8 bnx2x_mac_addr_t[ETH_ALEN]; + +/* acquire */ +int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vf_pf_resc_request *resc); +/* init */ +int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, + dma_addr_t *sb_map); + +/* VFOP queue construction helpers */ +void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_queue_init_params *init_params, + struct bnx2x_queue_setup_params *setup_params, + u16 q_idx, u16 sb_idx); + +void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_queue_init_params *init_params, + struct bnx2x_queue_setup_params *setup_params, + u16 q_idx, u16 sb_idx); + +void bnx2x_vfop_qctor_prep(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vf_queue *q, + struct bnx2x_vf_queue_construct_params *p, + unsigned long q_type); + +int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mac_vlan_filters *filters, + int qid, bool drv_only); + +int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_queue_construct_params *qctor); + +int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid); + +int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, + bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only); + +int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid, unsigned long accept_flags); + +int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf); + +int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf); + +int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_config_rss_params *rss); + +int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vfpf_tpa_tlv *tlv, + struct bnx2x_queue_update_tpa_params *params); + +/* VF release ~ VF close + VF release-resources + * + * Release is the ultimate SW shutdown and is called whenever an + * irrecoverable error is encountered. + */ +int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf); +int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); +u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); + +/* FLR routines */ + +/* VF FLR helpers */ +int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid); +void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid); + +/* Handles an FLR (or VF_DISABLE) notification form the MCP */ +void bnx2x_vf_handle_flr_event(struct bnx2x *bp); + +bool bnx2x_tlv_supported(u16 tlvtype); + +u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, + struct pf_vf_bulletin_content *bulletin); +int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf); + +enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); + +/* VF side vfpf channel functions */ +int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count); +int bnx2x_vfpf_release(struct bnx2x *bp); +int bnx2x_vfpf_release(struct bnx2x *bp); +int bnx2x_vfpf_init(struct bnx2x *bp); +void bnx2x_vfpf_close_vf(struct bnx2x *bp); +int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool is_leading); +int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set); +int bnx2x_vfpf_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *params); +int bnx2x_vfpf_set_mcast(struct net_device *dev); +int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp); + +static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, + size_t buf_len) +{ + strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len); +} + +static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, + struct bnx2x_fastpath *fp) +{ + return PXP_VF_ADDR_USDM_QUEUES_START + + bp->acquire_resp.resc.hw_qid[fp->index] * + sizeof(struct ustorm_queue_zone_data); +} + +enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); +void bnx2x_timer_sriov(struct bnx2x *bp); +void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); +void bnx2x_vf_pci_dealloc(struct bnx2x *bp); +int bnx2x_vf_pci_alloc(struct bnx2x *bp); +int bnx2x_enable_sriov(struct bnx2x *bp); +void bnx2x_disable_sriov(struct bnx2x *bp); +static inline int bnx2x_vf_headroom(struct bnx2x *bp) +{ + return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF; +} +void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); +int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); +void bnx2x_iov_channel_down(struct bnx2x *bp); + +void bnx2x_iov_task(struct work_struct *work); + +void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag); + +#else /* CONFIG_BNX2X_SRIOV */ + +static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, + struct bnx2x_queue_sp_obj **q_obj) {} +static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} +static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, + union event_ring_elem *elem) {return 1; } +static inline void bnx2x_vf_mbx(struct bnx2x *bp) {} +static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event) {} +static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; } +static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {} +static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; } +static inline void bnx2x_iov_free_mem(struct bnx2x *bp) {} +static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; } +static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {} +static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, + int num_vfs_param) {return 0; } +static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {} +static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; } +static inline void bnx2x_disable_sriov(struct bnx2x *bp) {} +static inline int bnx2x_vfpf_acquire(struct bnx2x *bp, + u8 tx_count, u8 rx_count) {return 0; } +static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } +static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } +static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} +static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; } +static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, + u8 vf_qid, bool set) {return 0; } +static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *params) {return 0; } +static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; } +static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; } +static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; } +static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; } +static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {} +static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, + size_t buf_len) {} +static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, + struct bnx2x_fastpath *fp) {return 0; } +static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) +{ + return PFVF_BULLETIN_UNCHANGED; +} +static inline void bnx2x_timer_sriov(struct bnx2x *bp) {} + +static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) +{ + return NULL; +} + +static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {} +static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } +static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} +static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } +static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} + +static inline void bnx2x_iov_task(struct work_struct *work) {} +static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {} + +#endif /* CONFIG_BNX2X_SRIOV */ +#endif /* bnx2x_sriov.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 1adef266fcd..ca47665f94b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1,12 +1,12 @@ /* bnx2x_stats.c: Broadcom Everest network driver. * - * Copyright (c) 2007-2011 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver * UDP CSUM errata workaround by Arik Gendelman @@ -19,7 +19,7 @@ #include "bnx2x_stats.h" #include "bnx2x_cmn.h" - +#include "bnx2x_sriov.h" /* Statistics */ @@ -39,14 +39,39 @@ static inline long bnx2x_hilo(u32 *hiref) #endif } -static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) +static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) { - u16 res = sizeof(struct host_port_stats) >> 2; + u16 res = 0; + + /* 'newest' convention - shmem2 cotains the size of the port stats */ + if (SHMEM2_HAS(bp, sizeof_port_stats)) { + u32 size = SHMEM2_RD(bp, sizeof_port_stats); + if (size) + res = size; - /* if PFC stats are not supported by the MFW, don't DMA them */ - if (!(bp->flags & BC_SUPPORTS_PFC_STATS)) - res -= (sizeof(u32)*4) >> 2; + /* prevent newer BC from causing buffer overflow */ + if (res > sizeof(struct host_port_stats)) + res = sizeof(struct host_port_stats); + } + /* Older convention - all BCs support the port stats' fields up until + * the 'not_used' field + */ + if (!res) { + res = offsetof(struct host_port_stats, not_used) + 4; + + /* if PFC stats are supported by the MFW, DMA them as well */ + if (bp->flags & BC_SUPPORTS_PFC_STATS) { + res += offsetof(struct host_port_stats, + pfc_frames_rx_lo) - + offsetof(struct host_port_stats, + pfc_frames_tx_hi) + 4 ; + } + } + + res >>= 2; + + WARN_ON(res > 2 * DMAE_LEN32_RD_MAX); return res; } @@ -54,6 +79,42 @@ static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) * Init service functions */ +static void bnx2x_dp_stats(struct bnx2x *bp) +{ + int i; + + DP(BNX2X_MSG_STATS, "dumping stats:\n" + "fw_stats_req\n" + " hdr\n" + " cmd_num %d\n" + " reserved0 %d\n" + " drv_stats_counter %d\n" + " reserved1 %d\n" + " stats_counters_addrs %x %x\n", + bp->fw_stats_req->hdr.cmd_num, + bp->fw_stats_req->hdr.reserved0, + bp->fw_stats_req->hdr.drv_stats_counter, + bp->fw_stats_req->hdr.reserved1, + bp->fw_stats_req->hdr.stats_counters_addrs.hi, + bp->fw_stats_req->hdr.stats_counters_addrs.lo); + + for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) { + DP(BNX2X_MSG_STATS, + "query[%d]\n" + " kind %d\n" + " index %d\n" + " funcID %d\n" + " reserved %d\n" + " address %x %x\n", + i, bp->fw_stats_req->query[i].kind, + bp->fw_stats_req->query[i].index, + bp->fw_stats_req->query[i].funcID, + bp->fw_stats_req->query[i].reserved, + bp->fw_stats_req->query[i].address.hi, + bp->fw_stats_req->query[i].address.lo); + } +} + /* Post the next statistics ramrod. Protect it with the spin in * order to ensure the strict order between statistics ramrods * (each ramrod has a sequence number passed in a @@ -75,10 +136,12 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) bp->fw_stats_req->hdr.drv_stats_counter = cpu_to_le16(bp->stats_counter++); - DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n", + DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", bp->fw_stats_req->hdr.drv_stats_counter); - + /* adjust the ramrod to include VF queues statistics */ + bnx2x_iov_adjust_stats_req(bp); + bnx2x_dp_stats(bp); /* send FW stats ramrod */ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, @@ -101,6 +164,11 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp) if (CHIP_REV_IS_SLOW(bp)) return; + /* Update MCP's statistics if possible */ + if (bp->func_stx) + memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, + sizeof(bp->func_stats)); + /* loader */ if (bp->executer_idx) { int loader_idx = PMF_DMAE_C(bp); @@ -128,7 +196,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp) } else if (bp->func_stx) { *stats_comp = 0; - bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); + bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp); } } @@ -144,7 +212,7 @@ static int bnx2x_stats_comp(struct bnx2x *bp) break; } cnt--; - usleep_range(1000, 1000); + usleep_range(1000, 2000); } return 1; } @@ -153,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp) * Statistics service functions */ -static void bnx2x_stats_pmf_update(struct bnx2x *bp) +/* should be called under stats_sema */ +static void __bnx2x_stats_pmf_update(struct bnx2x *bp) { struct dmae_command *dmae; u32 opcode; @@ -161,7 +230,7 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp) u32 *stats_comp = bnx2x_sp(bp, stats_comp); /* sanity */ - if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) { + if (!bp->port.pmf || !bp->port.port_stx) { BNX2X_ERR("BUG!\n"); return; } @@ -450,29 +519,61 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) *stats_comp = 0; } -static void bnx2x_stats_start(struct bnx2x *bp) +/* should be called under stats_sema */ +static void __bnx2x_stats_start(struct bnx2x *bp) { - if (bp->port.pmf) - bnx2x_port_stats_init(bp); + if (IS_PF(bp)) { + if (bp->port.pmf) + bnx2x_port_stats_init(bp); - else if (bp->func_stx) - bnx2x_func_stats_init(bp); + else if (bp->func_stx) + bnx2x_func_stats_init(bp); - bnx2x_hw_stats_post(bp); - bnx2x_storm_stats_post(bp); + bnx2x_hw_stats_post(bp); + bnx2x_storm_stats_post(bp); + } + + bp->stats_started = true; +} + +static void bnx2x_stats_start(struct bnx2x *bp) +{ + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); } static void bnx2x_stats_pmf_start(struct bnx2x *bp) { + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); bnx2x_stats_comp(bp); - bnx2x_stats_pmf_update(bp); - bnx2x_stats_start(bp); + __bnx2x_stats_pmf_update(bp); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); +} + +static void bnx2x_stats_pmf_update(struct bnx2x *bp) +{ + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + __bnx2x_stats_pmf_update(bp); + up(&bp->stats_sema); } static void bnx2x_stats_restart(struct bnx2x *bp) { + /* vfs travel through here as part of the statistics FSM, but no action + * is required + */ + if (IS_VF(bp)) + return; + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); bnx2x_stats_comp(bp); - bnx2x_stats_start(bp); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); } static void bnx2x_bmac_stats_update(struct bnx2x *bp) @@ -554,23 +655,11 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); /* collect PFC stats */ - DIFF_64(diff.hi, new->tx_stat_gtpp_hi, - pstats->pfc_frames_tx_hi, - diff.lo, new->tx_stat_gtpp_lo, - pstats->pfc_frames_tx_lo); pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; - ADD_64(pstats->pfc_frames_tx_hi, diff.hi, - pstats->pfc_frames_tx_lo, diff.lo); - DIFF_64(diff.hi, new->rx_stat_grpp_hi, - pstats->pfc_frames_rx_hi, - diff.lo, new->rx_stat_grpp_lo, - pstats->pfc_frames_rx_lo); pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; - ADD_64(pstats->pfc_frames_rx_hi, diff.hi, - pstats->pfc_frames_rx_lo, diff.lo); } estats->pause_frames_received_hi = @@ -638,31 +727,30 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp) tx_stat_dot3statsinternalmactransmiterrors); ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); - ADD_64(estats->etherstatspkts1024octetsto1522octets_hi, - new->stats_tx.tx_gt1518_hi, - estats->etherstatspkts1024octetsto1522octets_lo, - new->stats_tx.tx_gt1518_lo); + estats->etherstatspkts1024octetsto1522octets_hi = + pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi; + estats->etherstatspkts1024octetsto1522octets_lo = + pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo; - ADD_64(estats->etherstatspktsover1522octets_hi, - new->stats_tx.tx_gt2047_hi, - estats->etherstatspktsover1522octets_lo, - new->stats_tx.tx_gt2047_lo); + estats->etherstatspktsover1522octets_hi = + pstats->mac_stx[1].tx_stat_mac_2047_hi; + estats->etherstatspktsover1522octets_lo = + pstats->mac_stx[1].tx_stat_mac_2047_lo; ADD_64(estats->etherstatspktsover1522octets_hi, - new->stats_tx.tx_gt4095_hi, + pstats->mac_stx[1].tx_stat_mac_4095_hi, estats->etherstatspktsover1522octets_lo, - new->stats_tx.tx_gt4095_lo); + pstats->mac_stx[1].tx_stat_mac_4095_lo); ADD_64(estats->etherstatspktsover1522octets_hi, - new->stats_tx.tx_gt9216_hi, + pstats->mac_stx[1].tx_stat_mac_9216_hi, estats->etherstatspktsover1522octets_lo, - new->stats_tx.tx_gt9216_lo); - + pstats->mac_stx[1].tx_stat_mac_9216_lo); ADD_64(estats->etherstatspktsover1522octets_hi, - new->stats_tx.tx_gt16383_hi, + pstats->mac_stx[1].tx_stat_mac_16383_hi, estats->etherstatspktsover1522octets_lo, - new->stats_tx.tx_gt16383_lo); + pstats->mac_stx[1].tx_stat_mac_16383_lo); estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi; @@ -796,6 +884,12 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) pstats->host_port_stats_counter++; + if (CHIP_IS_E3(bp)) { + u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1 + : MISC_REG_CPMU_LP_SM_ENT_CNT_P0; + estats->eee_tx_lpi += REG_RD(bp, lpi_reg); + } + if (!BP_NOMCP(bp)) { u32 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); @@ -809,108 +903,100 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) return 0; } -static int bnx2x_storm_stats_update(struct bnx2x *bp) +static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) { - struct tstorm_per_port_stats *tport = - &bp->fw_stats_data->port.tstorm_port_statistics; - struct tstorm_per_pf_stats *tfunc = - &bp->fw_stats_data->pf.tstorm_pf_statistics; - struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); - struct bnx2x_eth_stats *estats = &bp->eth_stats; struct stats_counter *counters = &bp->fw_stats_data->storm_counters; - int i; u16 cur_stats_counter; - /* Make sure we use the value of the counter * used for sending the last stats ramrod. */ - spin_lock_bh(&bp->stats_lock); cur_stats_counter = bp->stats_counter - 1; - spin_unlock_bh(&bp->stats_lock); /* are storm stats valid? */ if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "stats not updated by xstorm" - " xstorm counter (0x%x) != stats_counter (0x%x)\n", + DP(BNX2X_MSG_STATS, + "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->xstats_counter), bp->stats_counter); return -EAGAIN; } if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "stats not updated by ustorm" - " ustorm counter (0x%x) != stats_counter (0x%x)\n", + DP(BNX2X_MSG_STATS, + "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->ustats_counter), bp->stats_counter); return -EAGAIN; } if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "stats not updated by cstorm" - " cstorm counter (0x%x) != stats_counter (0x%x)\n", + DP(BNX2X_MSG_STATS, + "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->cstats_counter), bp->stats_counter); return -EAGAIN; } if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "stats not updated by tstorm" - " tstorm counter (0x%x) != stats_counter (0x%x)\n", + DP(BNX2X_MSG_STATS, + "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->tstats_counter), bp->stats_counter); return -EAGAIN; } + return 0; +} + +static int bnx2x_storm_stats_update(struct bnx2x *bp) +{ + struct tstorm_per_port_stats *tport = + &bp->fw_stats_data->port.tstorm_port_statistics; + struct tstorm_per_pf_stats *tfunc = + &bp->fw_stats_data->pf.tstorm_pf_statistics; + struct host_func_stats *fstats = &bp->func_stats; + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old; + int i; + + /* vfs stat counter is managed by pf */ + if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp)) + return -EAGAIN; - memcpy(&(fstats->total_bytes_received_hi), - &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), - sizeof(struct host_func_stats) - 2*sizeof(u32)); estats->error_bytes_received_hi = 0; estats->error_bytes_received_lo = 0; - estats->etherstatsoverrsizepkts_hi = 0; - estats->etherstatsoverrsizepkts_lo = 0; - estats->no_buff_discard_hi = 0; - estats->no_buff_discard_lo = 0; - estats->total_tpa_aggregations_hi = 0; - estats->total_tpa_aggregations_lo = 0; - estats->total_tpa_aggregated_frames_hi = 0; - estats->total_tpa_aggregated_frames_lo = 0; - estats->total_tpa_bytes_hi = 0; - estats->total_tpa_bytes_lo = 0; for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; struct tstorm_per_queue_stats *tclient = &bp->fw_stats_data->queue_stats[i]. tstorm_queue_statistics; - struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; + struct tstorm_per_queue_stats *old_tclient = + &bnx2x_fp_stats(bp, fp)->old_tclient; struct ustorm_per_queue_stats *uclient = &bp->fw_stats_data->queue_stats[i]. ustorm_queue_statistics; - struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; + struct ustorm_per_queue_stats *old_uclient = + &bnx2x_fp_stats(bp, fp)->old_uclient; struct xstorm_per_queue_stats *xclient = &bp->fw_stats_data->queue_stats[i]. xstorm_queue_statistics; - struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; - struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + struct xstorm_per_queue_stats *old_xclient = + &bnx2x_fp_stats(bp, fp)->old_xclient; + struct bnx2x_eth_q_stats *qstats = + &bnx2x_fp_stats(bp, fp)->eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = + &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; + u32 diff; - DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, " - "bcast_sent 0x%x mcast_sent 0x%x\n", + DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n", i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); DP(BNX2X_MSG_STATS, "---------------\n"); - qstats->total_broadcast_bytes_received_hi = - le32_to_cpu(tclient->rcv_bcast_bytes.hi); - qstats->total_broadcast_bytes_received_lo = - le32_to_cpu(tclient->rcv_bcast_bytes.lo); - - qstats->total_multicast_bytes_received_hi = - le32_to_cpu(tclient->rcv_mcast_bytes.hi); - qstats->total_multicast_bytes_received_lo = - le32_to_cpu(tclient->rcv_mcast_bytes.lo); - - qstats->total_unicast_bytes_received_hi = - le32_to_cpu(tclient->rcv_ucast_bytes.hi); - qstats->total_unicast_bytes_received_lo = - le32_to_cpu(tclient->rcv_ucast_bytes.lo); + UPDATE_QSTAT(tclient->rcv_bcast_bytes, + total_broadcast_bytes_received); + UPDATE_QSTAT(tclient->rcv_mcast_bytes, + total_multicast_bytes_received); + UPDATE_QSTAT(tclient->rcv_ucast_bytes, + total_unicast_bytes_received); /* * sum to total_bytes_received all @@ -936,16 +1022,15 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo; - UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received); UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received); UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received); - UPDATE_EXTEND_TSTAT(pkts_too_big_discard, - etherstatsoverrsizepkts); - UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); + UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard, + etherstatsoverrsizepkts, 32); + UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16); SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received); @@ -953,24 +1038,17 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) total_multicast_packets_received); SUB_EXTEND_USTAT(bcast_no_buff_pkts, total_broadcast_packets_received); - UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard); - UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); - UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); - - qstats->total_broadcast_bytes_transmitted_hi = - le32_to_cpu(xclient->bcast_bytes_sent.hi); - qstats->total_broadcast_bytes_transmitted_lo = - le32_to_cpu(xclient->bcast_bytes_sent.lo); - - qstats->total_multicast_bytes_transmitted_hi = - le32_to_cpu(xclient->mcast_bytes_sent.hi); - qstats->total_multicast_bytes_transmitted_lo = - le32_to_cpu(xclient->mcast_bytes_sent.lo); - - qstats->total_unicast_bytes_transmitted_hi = - le32_to_cpu(xclient->ucast_bytes_sent.hi); - qstats->total_unicast_bytes_transmitted_lo = - le32_to_cpu(xclient->ucast_bytes_sent.lo); + UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard); + + UPDATE_QSTAT(xclient->bcast_bytes_sent, + total_broadcast_bytes_transmitted); + UPDATE_QSTAT(xclient->mcast_bytes_sent, + total_multicast_bytes_transmitted); + UPDATE_QSTAT(xclient->ucast_bytes_sent, + total_unicast_bytes_transmitted); + /* * sum to total_bytes_transmitted all * unicast/multicast/broadcast @@ -1006,110 +1084,54 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) total_transmitted_dropped_packets_error); /* TPA aggregations completed */ - UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations); + UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations); /* Number of network frames aggregated by TPA */ - UPDATE_EXTEND_USTAT(coalesced_pkts, - total_tpa_aggregated_frames); + UPDATE_EXTEND_E_USTAT(coalesced_pkts, + total_tpa_aggregated_frames); /* Total number of bytes in completed TPA aggregations */ - qstats->total_tpa_bytes_lo = - le32_to_cpu(uclient->coalesced_bytes.lo); - qstats->total_tpa_bytes_hi = - le32_to_cpu(uclient->coalesced_bytes.hi); - - /* TPA stats per-function */ - ADD_64(estats->total_tpa_aggregations_hi, - qstats->total_tpa_aggregations_hi, - estats->total_tpa_aggregations_lo, - qstats->total_tpa_aggregations_lo); - ADD_64(estats->total_tpa_aggregated_frames_hi, - qstats->total_tpa_aggregated_frames_hi, - estats->total_tpa_aggregated_frames_lo, - qstats->total_tpa_aggregated_frames_lo); - ADD_64(estats->total_tpa_bytes_hi, - qstats->total_tpa_bytes_hi, - estats->total_tpa_bytes_lo, - qstats->total_tpa_bytes_lo); - - ADD_64(fstats->total_bytes_received_hi, - qstats->total_bytes_received_hi, - fstats->total_bytes_received_lo, - qstats->total_bytes_received_lo); - ADD_64(fstats->total_bytes_transmitted_hi, - qstats->total_bytes_transmitted_hi, - fstats->total_bytes_transmitted_lo, - qstats->total_bytes_transmitted_lo); - ADD_64(fstats->total_unicast_packets_received_hi, - qstats->total_unicast_packets_received_hi, - fstats->total_unicast_packets_received_lo, - qstats->total_unicast_packets_received_lo); - ADD_64(fstats->total_multicast_packets_received_hi, - qstats->total_multicast_packets_received_hi, - fstats->total_multicast_packets_received_lo, - qstats->total_multicast_packets_received_lo); - ADD_64(fstats->total_broadcast_packets_received_hi, - qstats->total_broadcast_packets_received_hi, - fstats->total_broadcast_packets_received_lo, - qstats->total_broadcast_packets_received_lo); - ADD_64(fstats->total_unicast_packets_transmitted_hi, - qstats->total_unicast_packets_transmitted_hi, - fstats->total_unicast_packets_transmitted_lo, - qstats->total_unicast_packets_transmitted_lo); - ADD_64(fstats->total_multicast_packets_transmitted_hi, - qstats->total_multicast_packets_transmitted_hi, - fstats->total_multicast_packets_transmitted_lo, - qstats->total_multicast_packets_transmitted_lo); - ADD_64(fstats->total_broadcast_packets_transmitted_hi, - qstats->total_broadcast_packets_transmitted_hi, - fstats->total_broadcast_packets_transmitted_lo, - qstats->total_broadcast_packets_transmitted_lo); - ADD_64(fstats->valid_bytes_received_hi, - qstats->valid_bytes_received_hi, - fstats->valid_bytes_received_lo, - qstats->valid_bytes_received_lo); - - ADD_64(estats->etherstatsoverrsizepkts_hi, - qstats->etherstatsoverrsizepkts_hi, - estats->etherstatsoverrsizepkts_lo, - qstats->etherstatsoverrsizepkts_lo); - ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi, - estats->no_buff_discard_lo, qstats->no_buff_discard_lo); + UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes); + + UPDATE_ESTAT_QSTAT_64(total_tpa_bytes); + + UPDATE_FSTAT_QSTAT(total_bytes_received); + UPDATE_FSTAT_QSTAT(total_bytes_transmitted); + UPDATE_FSTAT_QSTAT(total_unicast_packets_received); + UPDATE_FSTAT_QSTAT(total_multicast_packets_received); + UPDATE_FSTAT_QSTAT(total_broadcast_packets_received); + UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted); + UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted); + UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted); + UPDATE_FSTAT_QSTAT(valid_bytes_received); } - ADD_64(fstats->total_bytes_received_hi, + ADD_64(estats->total_bytes_received_hi, estats->rx_stat_ifhcinbadoctets_hi, - fstats->total_bytes_received_lo, + estats->total_bytes_received_lo, estats->rx_stat_ifhcinbadoctets_lo); - ADD_64(fstats->total_bytes_received_hi, - le32_to_cpu(tfunc->rcv_error_bytes.hi), - fstats->total_bytes_received_lo, - le32_to_cpu(tfunc->rcv_error_bytes.lo)); + ADD_64_LE(estats->total_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + estats->total_bytes_received_lo, + tfunc->rcv_error_bytes.lo); - memcpy(estats, &(fstats->total_bytes_received_hi), - sizeof(struct host_func_stats) - 2*sizeof(u32)); + ADD_64_LE(estats->error_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + estats->error_bytes_received_lo, + tfunc->rcv_error_bytes.lo); - ADD_64(estats->error_bytes_received_hi, - le32_to_cpu(tfunc->rcv_error_bytes.hi), - estats->error_bytes_received_lo, - le32_to_cpu(tfunc->rcv_error_bytes.lo)); + UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong); - ADD_64(estats->etherstatsoverrsizepkts_hi, - estats->rx_stat_dot3statsframestoolong_hi, - estats->etherstatsoverrsizepkts_lo, - estats->rx_stat_dot3statsframestoolong_lo); ADD_64(estats->error_bytes_received_hi, estats->rx_stat_ifhcinbadoctets_hi, estats->error_bytes_received_lo, estats->rx_stat_ifhcinbadoctets_lo); if (bp->port.pmf) { - estats->mac_filter_discard = - le32_to_cpu(tport->mac_filter_discard); - estats->mf_tag_discard = - le32_to_cpu(tport->mf_tag_discard); - estats->brb_truncate_discard = - le32_to_cpu(tport->brb_truncate_discard); - estats->mac_discard = le32_to_cpu(tport->mac_discard); + struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; + UPDATE_FW_STAT(mac_filter_discard); + UPDATE_FW_STAT(mf_tag_discard); + UPDATE_FW_STAT(brb_truncate_discard); + UPDATE_FW_STAT(mac_discard); } fstats->host_func_stats_start = ++fstats->host_func_stats_end; @@ -1141,9 +1163,12 @@ static void bnx2x_net_stats_update(struct bnx2x *bp) nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); tmp = estats->mac_discard; - for_each_rx_queue(bp, i) - tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); - nstats->rx_dropped = tmp; + for_each_rx_queue(bp, i) { + struct tstorm_per_queue_stats *old_tclient = + &bp->fp_stats[i].old_tclient; + tmp += le32_to_cpu(old_tclient->checksum_discard); + } + nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; nstats->tx_dropped = 0; @@ -1191,17 +1216,16 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp) struct bnx2x_eth_stats *estats = &bp->eth_stats; int i; - estats->driver_xoff = 0; - estats->rx_err_discard_pkt = 0; - estats->rx_skb_alloc_failed = 0; - estats->hw_csum_err = 0; for_each_queue(bp, i) { - struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; - - estats->driver_xoff += qstats->driver_xoff; - estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt; - estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed; - estats->hw_csum_err += qstats->hw_csum_err; + struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = + &bp->fp_stats[i].eth_q_stats_old; + + UPDATE_ESTAT_QSTAT(driver_xoff); + UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); + UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed); + UPDATE_ESTAT_QSTAT(hw_csum_err); + UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt); } } @@ -1223,75 +1247,55 @@ static void bnx2x_stats_update(struct bnx2x *bp) { u32 *stats_comp = bnx2x_sp(bp, stats_comp); - if (bnx2x_edebug_stats_stopped(bp)) + /* we run update from timer context, so give up + * if somebody is in the middle of transition + */ + if (down_trylock(&bp->stats_sema)) return; - if (*stats_comp != DMAE_COMP_VAL) - return; + if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) + goto out; - if (bp->port.pmf) - bnx2x_hw_stats_update(bp); + if (IS_PF(bp)) { + if (*stats_comp != DMAE_COMP_VAL) + goto out; - if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) { - BNX2X_ERR("storm stats were not updated for 3 times\n"); - bnx2x_panic(); - return; + if (bp->port.pmf) + bnx2x_hw_stats_update(bp); + + if (bnx2x_storm_stats_update(bp)) { + if (bp->stats_pending++ == 3) { + BNX2X_ERR("storm stats were not updated for 3 times\n"); + bnx2x_panic(); + } + goto out; + } + } else { + /* vf doesn't collect HW statistics, and doesn't get completions + * perform only update + */ + bnx2x_storm_stats_update(bp); } bnx2x_net_stats_update(bp); bnx2x_drv_stats_update(bp); + /* vf is done */ + if (IS_VF(bp)) + goto out; + if (netif_msg_timer(bp)) { struct bnx2x_eth_stats *estats = &bp->eth_stats; - int i, cos; netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", estats->brb_drop_lo, estats->brb_truncate_lo); - - for_each_eth_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; - - pr_debug("%s: rx usage(%4u) *rx_cons_sb(%u) rx pkt(%lu) rx calls(%lu %lu)\n", - fp->name, (le16_to_cpu(*fp->rx_cons_sb) - - fp->rx_comp_cons), - le16_to_cpu(*fp->rx_cons_sb), - bnx2x_hilo(&qstats-> - total_unicast_packets_received_hi), - fp->rx_calls, fp->rx_pkt); - } - - for_each_eth_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - struct bnx2x_fp_txdata *txdata; - struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; - struct netdev_queue *txq; - - pr_debug("%s: tx pkt(%lu) (Xoff events %u)", - fp->name, - bnx2x_hilo( - &qstats->total_unicast_packets_transmitted_hi), - qstats->driver_xoff); - - for_each_cos_in_tx_queue(fp, cos) { - txdata = &fp->txdata[cos]; - txq = netdev_get_tx_queue(bp->dev, - FP_COS_TO_TXQ(fp, cos)); - - pr_debug("%d: tx avail(%4u) *tx_cons_sb(%u) tx calls (%lu) %s\n", - cos, - bnx2x_tx_avail(bp, txdata), - le16_to_cpu(*txdata->tx_cons_sb), - txdata->tx_pkt, - (netif_tx_queue_stopped(txq) ? - "Xoff" : "Xon") - ); - } - } } bnx2x_hw_stats_post(bp); bnx2x_storm_stats_post(bp); + +out: + up(&bp->stats_sema); } static void bnx2x_port_stats_stop(struct bnx2x *bp) @@ -1357,6 +1361,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp) { int update = 0; + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + + bp->stats_started = false; + bnx2x_stats_comp(bp); if (bp->port.pmf) @@ -1373,6 +1382,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp) bnx2x_hw_stats_post(bp); bnx2x_stats_comp(bp); } + + up(&bp->stats_sema); } static void bnx2x_stats_do_nothing(struct bnx2x *bp) @@ -1401,15 +1412,17 @@ static const struct { void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) { enum bnx2x_stats_state state; + void (*action)(struct bnx2x *bp); if (unlikely(bp->panic)) return; spin_lock_bh(&bp->stats_lock); state = bp->stats_state; bp->stats_state = bnx2x_stats_stm[state][event].next_state; + action = bnx2x_stats_stm[state][event].action; spin_unlock_bh(&bp->stats_lock); - bnx2x_stats_stm[state][event].action(bp); + action(bp); if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", @@ -1446,71 +1459,11 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp) bnx2x_stats_comp(bp); } -static void bnx2x_func_stats_base_init(struct bnx2x *bp) -{ - int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX; - u32 func_stx; - - /* sanity */ - if (!bp->port.pmf || !bp->func_stx) { - BNX2X_ERR("BUG!\n"); - return; - } - - /* save our func_stx */ - func_stx = bp->func_stx; - - for (vn = VN_0; vn < vn_max; vn++) { - int mb_idx = BP_FW_MB_IDX_VN(bp, vn); - - bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); - bnx2x_func_stats_init(bp); - bnx2x_hw_stats_post(bp); - bnx2x_stats_comp(bp); - } - - /* restore our func_stx */ - bp->func_stx = func_stx; -} - -static void bnx2x_func_stats_base_update(struct bnx2x *bp) -{ - struct dmae_command *dmae = &bp->stats_dmae; - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - - /* sanity */ - if (!bp->func_stx) { - BNX2X_ERR("BUG!\n"); - return; - } - - bp->executer_idx = 0; - memset(dmae, 0, sizeof(struct dmae_command)); - - dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, - true, DMAE_COMP_PCI); - dmae->src_addr_lo = bp->func_stx >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base)); - dmae->len = sizeof(struct host_func_stats) >> 2; - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; - - *stats_comp = 0; - bnx2x_hw_stats_post(bp); - bnx2x_stats_comp(bp); -} - -/** - * This function will prepare the statistics ramrod data the way +/* This function will prepare the statistics ramrod data the way * we will only have to increment the statistics counter and * send the ramrod each time we have to. - * - * @param bp */ -static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) +static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) { int i; int first_queue_query_index; @@ -1622,7 +1575,7 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) query[first_queue_query_index + i]; cur_query_entry->kind = STATS_TYPE_QUEUE; - cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]); + cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]); cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); @@ -1631,11 +1584,51 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) } } +void bnx2x_memset_stats(struct bnx2x *bp) +{ + int i; + + /* function stats */ + for_each_queue(bp, i) { + struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i]; + + memset(&fp_stats->old_tclient, 0, + sizeof(fp_stats->old_tclient)); + memset(&fp_stats->old_uclient, 0, + sizeof(fp_stats->old_uclient)); + memset(&fp_stats->old_xclient, 0, + sizeof(fp_stats->old_xclient)); + if (bp->stats_init) { + memset(&fp_stats->eth_q_stats, 0, + sizeof(fp_stats->eth_q_stats)); + memset(&fp_stats->eth_q_stats_old, 0, + sizeof(fp_stats->eth_q_stats_old)); + } + } + + memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); + + if (bp->stats_init) { + memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old)); + memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old)); + memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old)); + memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); + memset(&bp->func_stats, 0, sizeof(bp->func_stats)); + } + + bp->stats_state = STATS_STATE_DISABLED; + + if (bp->port.pmf && bp->port.port_stx) + bnx2x_port_stats_base_init(bp); + + /* mark the end of statistics initializiation */ + bp->stats_init = false; +} + void bnx2x_stats_init(struct bnx2x *bp) { int /*abs*/port = BP_PORT(bp); int mb_idx = BP_FW_MB_IDX(bp); - int i; bp->stats_pending = 0; bp->executer_idx = 0; @@ -1653,6 +1646,10 @@ void bnx2x_stats_init(struct bnx2x *bp) DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", bp->port.port_stx, bp->func_stx); + /* pmf should retrieve port statistics from SP on a non-init*/ + if (!bp->stats_init && bp->port.pmf && bp->port.port_stx) + bnx2x_stats_handle(bp, STATS_EVENT_PMF); + port = BP_PORT(bp); /* port stats */ memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); @@ -1667,31 +1664,343 @@ void bnx2x_stats_init(struct bnx2x *bp) &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); } - /* function stats */ - for_each_queue(bp, i) { + /* Prepare statistics ramrod data */ + bnx2x_prep_fw_stats_req(bp); + + /* Clean SP from previous statistics */ + if (bp->stats_init) { + if (bp->func_stx) { + memset(bnx2x_sp(bp, func_stats), 0, + sizeof(struct host_func_stats)); + bnx2x_func_stats_init(bp); + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); + } + } + + bnx2x_memset_stats(bp); +} + +void bnx2x_save_statistics(struct bnx2x *bp) +{ + int i; + struct net_device_stats *nstats = &bp->dev->stats; + + /* save queue statistics */ + for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_eth_q_stats *qstats = + &bnx2x_fp_stats(bp, fp)->eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = + &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; + + UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); + UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); + UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi); + UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo); + UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi); + UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo); + UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi); + UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo); + UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi); + UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo); + UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi); + UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo); + UPDATE_QSTAT_OLD(total_tpa_bytes_hi); + UPDATE_QSTAT_OLD(total_tpa_bytes_lo); + } + + /* save net_device_stats statistics */ + bp->net_stats_old.rx_dropped = nstats->rx_dropped; - memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); - memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); - memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); - memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); + /* store port firmware statistics */ + if (bp->port.pmf && IS_MF(bp)) { + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; + UPDATE_FW_STAT_OLD(mac_filter_discard); + UPDATE_FW_STAT_OLD(mf_tag_discard); + UPDATE_FW_STAT_OLD(brb_truncate_discard); + UPDATE_FW_STAT_OLD(mac_discard); } +} - /* Prepare statistics ramrod data */ - bnx2x_prep_fw_stats_req(bp); +void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, + u32 stats_type) +{ + int i; + struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct per_queue_stats *fcoe_q_stats = + &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]; - memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); - memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); + struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = + &fcoe_q_stats->tstorm_queue_statistics; - bp->stats_state = STATS_STATE_DISABLED; + struct ustorm_per_queue_stats *fcoe_q_ustorm_stats = + &fcoe_q_stats->ustorm_queue_statistics; - if (bp->port.pmf) { - if (bp->port.port_stx) - bnx2x_port_stats_base_init(bp); + struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = + &fcoe_q_stats->xstorm_queue_statistics; - if (bp->func_stx) - bnx2x_func_stats_base_init(bp); + struct fcoe_statistics_params *fw_fcoe_stat = + &bp->fw_stats_data->fcoe; + + memset(afex_stats, 0, sizeof(struct afex_stats)); + + for_each_eth_queue(bp, i) { + struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; + + ADD_64(afex_stats->rx_unicast_bytes_hi, + qstats->total_unicast_bytes_received_hi, + afex_stats->rx_unicast_bytes_lo, + qstats->total_unicast_bytes_received_lo); + + ADD_64(afex_stats->rx_broadcast_bytes_hi, + qstats->total_broadcast_bytes_received_hi, + afex_stats->rx_broadcast_bytes_lo, + qstats->total_broadcast_bytes_received_lo); + + ADD_64(afex_stats->rx_multicast_bytes_hi, + qstats->total_multicast_bytes_received_hi, + afex_stats->rx_multicast_bytes_lo, + qstats->total_multicast_bytes_received_lo); + + ADD_64(afex_stats->rx_unicast_frames_hi, + qstats->total_unicast_packets_received_hi, + afex_stats->rx_unicast_frames_lo, + qstats->total_unicast_packets_received_lo); + + ADD_64(afex_stats->rx_broadcast_frames_hi, + qstats->total_broadcast_packets_received_hi, + afex_stats->rx_broadcast_frames_lo, + qstats->total_broadcast_packets_received_lo); + + ADD_64(afex_stats->rx_multicast_frames_hi, + qstats->total_multicast_packets_received_hi, + afex_stats->rx_multicast_frames_lo, + qstats->total_multicast_packets_received_lo); + + /* sum to rx_frames_discarded all discraded + * packets due to size, ttl0 and checksum + */ + ADD_64(afex_stats->rx_frames_discarded_hi, + qstats->total_packets_received_checksum_discarded_hi, + afex_stats->rx_frames_discarded_lo, + qstats->total_packets_received_checksum_discarded_lo); - } else if (bp->func_stx) - bnx2x_func_stats_base_update(bp); + ADD_64(afex_stats->rx_frames_discarded_hi, + qstats->total_packets_received_ttl0_discarded_hi, + afex_stats->rx_frames_discarded_lo, + qstats->total_packets_received_ttl0_discarded_lo); + + ADD_64(afex_stats->rx_frames_discarded_hi, + qstats->etherstatsoverrsizepkts_hi, + afex_stats->rx_frames_discarded_lo, + qstats->etherstatsoverrsizepkts_lo); + + ADD_64(afex_stats->rx_frames_dropped_hi, + qstats->no_buff_discard_hi, + afex_stats->rx_frames_dropped_lo, + qstats->no_buff_discard_lo); + + ADD_64(afex_stats->tx_unicast_bytes_hi, + qstats->total_unicast_bytes_transmitted_hi, + afex_stats->tx_unicast_bytes_lo, + qstats->total_unicast_bytes_transmitted_lo); + + ADD_64(afex_stats->tx_broadcast_bytes_hi, + qstats->total_broadcast_bytes_transmitted_hi, + afex_stats->tx_broadcast_bytes_lo, + qstats->total_broadcast_bytes_transmitted_lo); + + ADD_64(afex_stats->tx_multicast_bytes_hi, + qstats->total_multicast_bytes_transmitted_hi, + afex_stats->tx_multicast_bytes_lo, + qstats->total_multicast_bytes_transmitted_lo); + + ADD_64(afex_stats->tx_unicast_frames_hi, + qstats->total_unicast_packets_transmitted_hi, + afex_stats->tx_unicast_frames_lo, + qstats->total_unicast_packets_transmitted_lo); + + ADD_64(afex_stats->tx_broadcast_frames_hi, + qstats->total_broadcast_packets_transmitted_hi, + afex_stats->tx_broadcast_frames_lo, + qstats->total_broadcast_packets_transmitted_lo); + + ADD_64(afex_stats->tx_multicast_frames_hi, + qstats->total_multicast_packets_transmitted_hi, + afex_stats->tx_multicast_frames_lo, + qstats->total_multicast_packets_transmitted_lo); + + ADD_64(afex_stats->tx_frames_dropped_hi, + qstats->total_transmitted_dropped_packets_error_hi, + afex_stats->tx_frames_dropped_lo, + qstats->total_transmitted_dropped_packets_error_lo); + } + + /* now add FCoE statistics which are collected separately + * (both offloaded and non offloaded) + */ + if (!NO_FCOE(bp)) { + ADD_64_LE(afex_stats->rx_unicast_bytes_hi, + LE32_0, + afex_stats->rx_unicast_bytes_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); + + ADD_64_LE(afex_stats->rx_unicast_bytes_hi, + fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, + afex_stats->rx_unicast_bytes_lo, + fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); + + ADD_64_LE(afex_stats->rx_broadcast_bytes_hi, + fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, + afex_stats->rx_broadcast_bytes_lo, + fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); + + ADD_64_LE(afex_stats->rx_multicast_bytes_hi, + fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, + afex_stats->rx_multicast_bytes_lo, + fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); + + ADD_64_LE(afex_stats->rx_unicast_frames_hi, + LE32_0, + afex_stats->rx_unicast_frames_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); + + ADD_64_LE(afex_stats->rx_unicast_frames_hi, + LE32_0, + afex_stats->rx_unicast_frames_lo, + fcoe_q_tstorm_stats->rcv_ucast_pkts); + + ADD_64_LE(afex_stats->rx_broadcast_frames_hi, + LE32_0, + afex_stats->rx_broadcast_frames_lo, + fcoe_q_tstorm_stats->rcv_bcast_pkts); + + ADD_64_LE(afex_stats->rx_multicast_frames_hi, + LE32_0, + afex_stats->rx_multicast_frames_lo, + fcoe_q_tstorm_stats->rcv_ucast_pkts); + + ADD_64_LE(afex_stats->rx_frames_discarded_hi, + LE32_0, + afex_stats->rx_frames_discarded_lo, + fcoe_q_tstorm_stats->checksum_discard); + + ADD_64_LE(afex_stats->rx_frames_discarded_hi, + LE32_0, + afex_stats->rx_frames_discarded_lo, + fcoe_q_tstorm_stats->pkts_too_big_discard); + + ADD_64_LE(afex_stats->rx_frames_discarded_hi, + LE32_0, + afex_stats->rx_frames_discarded_lo, + fcoe_q_tstorm_stats->ttl0_discard); + + ADD_64_LE16(afex_stats->rx_frames_dropped_hi, + LE16_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_tstorm_stats->no_buff_discard); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_ustorm_stats->ucast_no_buff_pkts); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_ustorm_stats->mcast_no_buff_pkts); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_ustorm_stats->bcast_no_buff_pkts); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt); + + ADD_64_LE(afex_stats->tx_unicast_bytes_hi, + LE32_0, + afex_stats->tx_unicast_bytes_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); + + ADD_64_LE(afex_stats->tx_unicast_bytes_hi, + fcoe_q_xstorm_stats->ucast_bytes_sent.hi, + afex_stats->tx_unicast_bytes_lo, + fcoe_q_xstorm_stats->ucast_bytes_sent.lo); + + ADD_64_LE(afex_stats->tx_broadcast_bytes_hi, + fcoe_q_xstorm_stats->bcast_bytes_sent.hi, + afex_stats->tx_broadcast_bytes_lo, + fcoe_q_xstorm_stats->bcast_bytes_sent.lo); + + ADD_64_LE(afex_stats->tx_multicast_bytes_hi, + fcoe_q_xstorm_stats->mcast_bytes_sent.hi, + afex_stats->tx_multicast_bytes_lo, + fcoe_q_xstorm_stats->mcast_bytes_sent.lo); + + ADD_64_LE(afex_stats->tx_unicast_frames_hi, + LE32_0, + afex_stats->tx_unicast_frames_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); + + ADD_64_LE(afex_stats->tx_unicast_frames_hi, + LE32_0, + afex_stats->tx_unicast_frames_lo, + fcoe_q_xstorm_stats->ucast_pkts_sent); + + ADD_64_LE(afex_stats->tx_broadcast_frames_hi, + LE32_0, + afex_stats->tx_broadcast_frames_lo, + fcoe_q_xstorm_stats->bcast_pkts_sent); + + ADD_64_LE(afex_stats->tx_multicast_frames_hi, + LE32_0, + afex_stats->tx_multicast_frames_lo, + fcoe_q_xstorm_stats->mcast_pkts_sent); + + ADD_64_LE(afex_stats->tx_frames_dropped_hi, + LE32_0, + afex_stats->tx_frames_dropped_lo, + fcoe_q_xstorm_stats->error_drop_pkts); + } + + /* if port stats are requested, add them to the PMF + * stats, as anyway they will be accumulated by the + * MCP before sent to the switch + */ + if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) { + ADD_64(afex_stats->rx_frames_dropped_hi, + 0, + afex_stats->rx_frames_dropped_lo, + estats->mac_filter_discard); + ADD_64(afex_stats->rx_frames_dropped_hi, + 0, + afex_stats->rx_frames_dropped_lo, + estats->brb_truncate_discard); + ADD_64(afex_stats->rx_frames_discarded_hi, + 0, + afex_stats->rx_frames_discarded_lo, + estats->mac_discard); + } +} + +void bnx2x_stats_safe_exec(struct bnx2x *bp, + void (func_to_exec)(void *cookie), + void *cookie){ + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + bnx2x_stats_comp(bp); + func_to_exec(cookie); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 683deb05310..2beceaefdee 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -1,12 +1,12 @@ /* bnx2x_stats.h: Broadcom Everest network driver. * - * Copyright (c) 2007-2011 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver * UDP CSUM errata workaround by Arik Gendelman @@ -40,7 +40,6 @@ struct nig_stats { u32 egress_mac_pkt1_hi; }; - enum bnx2x_stats_event { STATS_EVENT_PMF = 0, STATS_EVENT_LINK_UP, @@ -199,8 +198,14 @@ struct bnx2x_eth_stats { u32 pfc_frames_received_lo; u32 pfc_frames_sent_hi; u32 pfc_frames_sent_lo; -}; + /* Recovery */ + u32 recoverable_error; + u32 unrecoverable_error; + u32 driver_filtered_tx_pkt; + /* src: Clear-on-Read register; Will not survive PMF Migration */ + u32 eee_tx_lpi; +}; struct bnx2x_eth_q_stats { u32 total_unicast_bytes_received_hi; @@ -258,6 +263,70 @@ struct bnx2x_eth_q_stats { u32 total_tpa_aggregated_frames_lo; u32 total_tpa_bytes_hi; u32 total_tpa_bytes_lo; + u32 driver_filtered_tx_pkt; +}; + +struct bnx2x_eth_stats_old { + u32 rx_stat_dot3statsframestoolong_hi; + u32 rx_stat_dot3statsframestoolong_lo; +}; + +struct bnx2x_eth_q_stats_old { + /* Fields to perserve over fw reset*/ + u32 total_unicast_bytes_received_hi; + u32 total_unicast_bytes_received_lo; + u32 total_broadcast_bytes_received_hi; + u32 total_broadcast_bytes_received_lo; + u32 total_multicast_bytes_received_hi; + u32 total_multicast_bytes_received_lo; + u32 total_unicast_bytes_transmitted_hi; + u32 total_unicast_bytes_transmitted_lo; + u32 total_broadcast_bytes_transmitted_hi; + u32 total_broadcast_bytes_transmitted_lo; + u32 total_multicast_bytes_transmitted_hi; + u32 total_multicast_bytes_transmitted_lo; + u32 total_tpa_bytes_hi; + u32 total_tpa_bytes_lo; + + /* Fields to perserve last of */ + u32 total_bytes_received_hi; + u32 total_bytes_received_lo; + u32 total_bytes_transmitted_hi; + u32 total_bytes_transmitted_lo; + u32 total_unicast_packets_received_hi; + u32 total_unicast_packets_received_lo; + u32 total_multicast_packets_received_hi; + u32 total_multicast_packets_received_lo; + u32 total_broadcast_packets_received_hi; + u32 total_broadcast_packets_received_lo; + u32 total_unicast_packets_transmitted_hi; + u32 total_unicast_packets_transmitted_lo; + u32 total_multicast_packets_transmitted_hi; + u32 total_multicast_packets_transmitted_lo; + u32 total_broadcast_packets_transmitted_hi; + u32 total_broadcast_packets_transmitted_lo; + u32 valid_bytes_received_hi; + u32 valid_bytes_received_lo; + + u32 total_tpa_bytes_hi_old; + u32 total_tpa_bytes_lo_old; + + u32 driver_xoff_old; + u32 rx_err_discard_pkt_old; + u32 rx_skb_alloc_failed_old; + u32 hw_csum_err_old; + u32 driver_filtered_tx_pkt_old; +}; + +struct bnx2x_net_stats_old { + u32 rx_dropped; +}; + +struct bnx2x_fw_port_stats_old { + u32 mac_filter_discard; + u32 mf_tag_discard; + u32 brb_truncate_discard; + u32 mac_discard; }; /**************************************************************************** @@ -271,6 +340,18 @@ struct bnx2x_eth_q_stats { s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ } while (0) +#define LE32_0 ((__force __le32) 0) +#define LE16_0 ((__force __le16) 0) + +/* The _force is for cases where high value is 0 */ +#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \ + ADD_64(s_hi, le32_to_cpu(a_hi_le), \ + s_lo, le32_to_cpu(a_lo_le)) + +#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \ + ADD_64(s_hi, le16_to_cpu(a_hi_le), \ + s_lo, le16_to_cpu(a_lo_le)) + /* difference = minuend - subtrahend */ #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ do { \ @@ -337,13 +418,22 @@ struct bnx2x_eth_q_stats { new->s); \ } while (0) -#define UPDATE_EXTEND_TSTAT(s, t) \ +#define UPDATE_EXTEND_TSTAT_X(s, t, size) \ do { \ - diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \ + diff = le##size##_to_cpu(tclient->s) - \ + le##size##_to_cpu(old_tclient->s); \ old_tclient->s = tclient->s; \ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ } while (0) +#define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32) + +#define UPDATE_EXTEND_E_TSTAT(s, t, size) \ + do { \ + UPDATE_EXTEND_TSTAT_X(s, t, size); \ + ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \ + } while (0) + #define UPDATE_EXTEND_USTAT(s, t) \ do { \ diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ @@ -351,6 +441,12 @@ struct bnx2x_eth_q_stats { ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ } while (0) +#define UPDATE_EXTEND_E_USTAT(s, t) \ + do { \ + UPDATE_EXTEND_USTAT(s, t); \ + ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \ + } while (0) + #define UPDATE_EXTEND_XSTAT(s, t) \ do { \ diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \ @@ -358,6 +454,67 @@ struct bnx2x_eth_q_stats { ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ } while (0) +#define UPDATE_QSTAT(s, t) \ + do { \ + qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \ + qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \ + + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \ + } while (0) + +#define UPDATE_QSTAT_OLD(f) \ + do { \ + qstats_old->f = qstats->f; \ + } while (0) + +#define UPDATE_ESTAT_QSTAT_64(s) \ + do { \ + ADD_64(estats->s##_hi, qstats->s##_hi, \ + estats->s##_lo, qstats->s##_lo); \ + SUB_64(estats->s##_hi, qstats_old->s##_hi_old, \ + estats->s##_lo, qstats_old->s##_lo_old); \ + qstats_old->s##_hi_old = qstats->s##_hi; \ + qstats_old->s##_lo_old = qstats->s##_lo; \ + } while (0) + +#define UPDATE_ESTAT_QSTAT(s) \ + do { \ + estats->s += qstats->s; \ + estats->s -= qstats_old->s##_old; \ + qstats_old->s##_old = qstats->s; \ + } while (0) + +#define UPDATE_FSTAT_QSTAT(s) \ + do { \ + ADD_64(fstats->s##_hi, qstats->s##_hi, \ + fstats->s##_lo, qstats->s##_lo); \ + SUB_64(fstats->s##_hi, qstats_old->s##_hi, \ + fstats->s##_lo, qstats_old->s##_lo); \ + estats->s##_hi = fstats->s##_hi; \ + estats->s##_lo = fstats->s##_lo; \ + qstats_old->s##_hi = qstats->s##_hi; \ + qstats_old->s##_lo = qstats->s##_lo; \ + } while (0) + +#define UPDATE_FW_STAT(s) \ + do { \ + estats->s = le32_to_cpu(tport->s) + fwstats->s; \ + } while (0) + +#define UPDATE_FW_STAT_OLD(f) \ + do { \ + fwstats->f = estats->f; \ + } while (0) + +#define UPDATE_ESTAT(s, t) \ + do { \ + SUB_64(estats->s##_hi, estats_old->t##_hi, \ + estats->s##_lo, estats_old->t##_lo); \ + ADD_64(estats->s##_hi, estats->t##_hi, \ + estats->s##_lo, estats->t##_lo); \ + estats_old->t##_hi = estats->t##_hi; \ + estats_old->t##_lo = estats->t##_lo; \ + } while (0) + /* minuend -= subtrahend */ #define SUB_64(m_hi, s_hi, m_lo, s_lo) \ do { \ @@ -376,12 +533,23 @@ struct bnx2x_eth_q_stats { SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ } while (0) - /* forward */ struct bnx2x; +void bnx2x_memset_stats(struct bnx2x *bp); void bnx2x_stats_init(struct bnx2x *bp); - void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); +void bnx2x_stats_safe_exec(struct bnx2x *bp, + void (func_to_exec)(void *cookie), + void *cookie); + +/** + * bnx2x_save_statistics - save statistics when unloading. + * + * @bp: driver handle + */ +void bnx2x_save_statistics(struct bnx2x *bp); +void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, + u32 stats_type); #endif /* BNX2X_STATS_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c new file mode 100644 index 00000000000..d712d0ddd71 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -0,0 +1,2034 @@ +/* bnx2x_vfpf.c: Broadcom Everest network driver. + * + * Copyright 2009-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> + * Written by: Shmulik Ravid + * Ariel Elior <ariel.elior@qlogic.com> + */ + +#include "bnx2x.h" +#include "bnx2x_cmn.h" +#include <linux/crc32.h> + +static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx); + +/* place a given tlv on the tlv buffer at a given offset */ +static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, + u16 offset, u16 type, u16 length) +{ + struct channel_tlv *tl = + (struct channel_tlv *)(tlvs_list + offset); + + tl->type = type; + tl->length = length; +} + +/* Clear the mailbox and init the header of the first tlv */ +static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, + u16 type, u16 length) +{ + mutex_lock(&bp->vf2pf_mutex); + + DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n", + type); + + /* Clear mailbox */ + memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg)); + + /* init type and length */ + bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length); + + /* init first tlv header */ + first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req); +} + +/* releases the mailbox */ +static void bnx2x_vfpf_finalize(struct bnx2x *bp, + struct vfpf_first_tlv *first_tlv) +{ + DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n", + first_tlv->tl.type); + + mutex_unlock(&bp->vf2pf_mutex); +} + +/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */ +static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list, + enum channel_tlvs req_tlv) +{ + struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list; + + do { + if (tlv->type == req_tlv) + return tlv; + + if (!tlv->length) { + BNX2X_ERR("Found TLV with length 0\n"); + return NULL; + } + + tlvs_list += tlv->length; + tlv = (struct channel_tlv *)tlvs_list; + } while (tlv->type != CHANNEL_TLV_LIST_END); + + DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv); + + return NULL; +} + +/* list the types and lengths of the tlvs on the buffer */ +static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) +{ + int i = 1; + struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list; + + while (tlv->type != CHANNEL_TLV_LIST_END) { + /* output tlv */ + DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i, + tlv->type, tlv->length); + + /* advance to next tlv */ + tlvs_list += tlv->length; + + /* cast general tlv list pointer to channel tlv header*/ + tlv = (struct channel_tlv *)tlvs_list; + + i++; + + /* break condition for this loop */ + if (i > MAX_TLVS_IN_LIST) { + WARN(true, "corrupt tlvs"); + return; + } + } + + /* output last tlv */ + DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i, + tlv->type, tlv->length); +} + +/* test whether we support a tlv type */ +bool bnx2x_tlv_supported(u16 tlvtype) +{ + return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; +} + +static inline int bnx2x_pfvf_status_codes(int rc) +{ + switch (rc) { + case 0: + return PFVF_STATUS_SUCCESS; + case -ENOMEM: + return PFVF_STATUS_NO_RESOURCE; + default: + return PFVF_STATUS_FAILURE; + } +} + +static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) +{ + struct cstorm_vf_zone_data __iomem *zone_data = + REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START); + int tout = 100, interval = 100; /* wait for 10 seconds */ + + if (*done) { + BNX2X_ERR("done was non zero before message to pf was sent\n"); + WARN_ON(true); + return -EINVAL; + } + + /* if PF indicated channel is down avoid sending message. Return success + * so calling flow can continue + */ + bnx2x_sample_bulletin(bp); + if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { + DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n"); + *done = PFVF_STATUS_SUCCESS; + return -EINVAL; + } + + /* Write message address */ + writel(U64_LO(msg_mapping), + &zone_data->non_trigger.vf_pf_channel.msg_addr_lo); + writel(U64_HI(msg_mapping), + &zone_data->non_trigger.vf_pf_channel.msg_addr_hi); + + /* make sure the address is written before FW accesses it */ + wmb(); + + /* Trigger the PF FW */ + writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid); + + /* Wait for PF to complete */ + while ((tout >= 0) && (!*done)) { + msleep(interval); + tout -= 1; + + /* progress indicator - HV can take its own sweet time in + * answering VFs... + */ + DP_CONT(BNX2X_MSG_IOV, "."); + } + + if (!*done) { + BNX2X_ERR("PF response has timed out\n"); + return -EAGAIN; + } + DP(BNX2X_MSG_SP, "Got a response from PF\n"); + return 0; +} + +static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id) +{ + u32 me_reg; + int tout = 10, interval = 100; /* Wait for 1 sec */ + + do { + /* pxp traps vf read of doorbells and returns me reg value */ + me_reg = readl(bp->doorbells); + if (GOOD_ME_REG(me_reg)) + break; + + msleep(interval); + + BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?", + me_reg); + } while (tout-- > 0); + + if (!GOOD_ME_REG(me_reg)) { + BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg); + return -EINVAL; + } + + DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg); + + *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; + + return 0; +} + +int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) +{ + int rc = 0, attempts = 0; + struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire; + struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp; + struct vfpf_port_phys_id_resp_tlv *phys_port_resp; + u32 vf_id; + bool resources_acquired = false; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req)); + + if (bnx2x_get_vf_id(bp, &vf_id)) { + rc = -EAGAIN; + goto out; + } + + req->vfdev_info.vf_id = vf_id; + req->vfdev_info.vf_os = 0; + + req->resc_request.num_rxqs = rx_count; + req->resc_request.num_txqs = tx_count; + req->resc_request.num_sbs = bp->igu_sb_cnt; + req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS; + req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS; + + /* pf 2 vf bulletin board address */ + req->bulletin_addr = bp->pf2vf_bulletin_mapping; + + /* Request physical port identifier */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, + CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv)); + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, + req->first_tlv.tl.length + sizeof(struct channel_tlv), + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + while (!resources_acquired) { + DP(BNX2X_MSG_SP, "attempting to acquire resources\n"); + + /* send acquire request */ + rc = bnx2x_send_msg2pf(bp, + &resp->hdr.status, + bp->vf2pf_mbox_mapping); + + /* PF timeout */ + if (rc) + goto out; + + /* copy acquire response from buffer to bp */ + memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp)); + + attempts++; + + /* test whether the PF accepted our request. If not, humble + * the request and try again. + */ + if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) { + DP(BNX2X_MSG_SP, "resources acquired\n"); + resources_acquired = true; + } else if (bp->acquire_resp.hdr.status == + PFVF_STATUS_NO_RESOURCE && + attempts < VF_ACQUIRE_THRESH) { + DP(BNX2X_MSG_SP, + "PF unwilling to fulfill resource request. Try PF recommended amount\n"); + + /* humble our request */ + req->resc_request.num_txqs = + min(req->resc_request.num_txqs, + bp->acquire_resp.resc.num_txqs); + req->resc_request.num_rxqs = + min(req->resc_request.num_rxqs, + bp->acquire_resp.resc.num_rxqs); + req->resc_request.num_sbs = + min(req->resc_request.num_sbs, + bp->acquire_resp.resc.num_sbs); + req->resc_request.num_mac_filters = + min(req->resc_request.num_mac_filters, + bp->acquire_resp.resc.num_mac_filters); + req->resc_request.num_vlan_filters = + min(req->resc_request.num_vlan_filters, + bp->acquire_resp.resc.num_vlan_filters); + req->resc_request.num_mc_filters = + min(req->resc_request.num_mc_filters, + bp->acquire_resp.resc.num_mc_filters); + + /* Clear response buffer */ + memset(&bp->vf2pf_mbox->resp, 0, + sizeof(union pfvf_tlvs)); + } else { + /* PF reports error */ + BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n", + bp->acquire_resp.hdr.status); + rc = -EAGAIN; + goto out; + } + } + + /* Retrieve physical port id (if possible) */ + phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *) + bnx2x_search_tlv_list(bp, resp, + CHANNEL_TLV_PHYS_PORT_ID); + if (phys_port_resp) { + memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN); + bp->flags |= HAS_PHYS_PORT_ID; + } + + /* get HW info */ + bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff); + bp->link_params.chip_id = bp->common.chip_id; + bp->db_size = bp->acquire_resp.pfdev_info.db_size; + bp->common.int_block = INT_BLOCK_IGU; + bp->common.chip_port_mode = CHIP_2_PORT_MODE; + bp->igu_dsb_id = -1; + bp->mf_ov = 0; + bp->mf_mode = 0; + bp->common.flash_size = 0; + bp->flags |= + NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG; + bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs; + bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; + strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, + sizeof(bp->fw_ver)); + + if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr)) + memcpy(bp->dev->dev_addr, + bp->acquire_resp.resc.current_mac_addr, + ETH_ALEN); + +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + return rc; +} + +int bnx2x_vfpf_release(struct bnx2x *bp) +{ + struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + u32 rc, vf_id; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req)); + + if (bnx2x_get_vf_id(bp, &vf_id)) { + rc = -EAGAIN; + goto out; + } + + req->vf_id = vf_id; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + /* send release request */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + + if (rc) + /* PF timeout */ + goto out; + + if (resp->hdr.status == PFVF_STATUS_SUCCESS) { + /* PF released us */ + DP(BNX2X_MSG_SP, "vf released\n"); + } else { + /* PF reports error */ + BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n", + resp->hdr.status); + rc = -EAGAIN; + goto out; + } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + +/* Tell PF about SB addresses */ +int bnx2x_vfpf_init(struct bnx2x *bp) +{ + struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int rc, i; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req)); + + /* status blocks */ + for_each_eth_queue(bp, i) + req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i, + status_blk_mapping); + + /* statistics - requests only supports single queue for now */ + req->stats_addr = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, queue_stats); + + req->stats_stride = sizeof(struct per_queue_stats); + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) + goto out; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("INIT VF failed: %d. Breaking...\n", + resp->hdr.status); + rc = -EAGAIN; + goto out; + } + + DP(BNX2X_MSG_SP, "INIT VF Succeeded\n"); +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + +/* CLOSE VF - opposite to INIT_VF */ +void bnx2x_vfpf_close_vf(struct bnx2x *bp) +{ + struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int i, rc; + u32 vf_id; + + /* If we haven't got a valid VF id, there is no sense to + * continue with sending messages + */ + if (bnx2x_get_vf_id(bp, &vf_id)) + goto free_irq; + + /* Close the queues */ + for_each_queue(bp, i) + bnx2x_vfpf_teardown_queue(bp, i); + + /* remove mac */ + bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false); + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req)); + + req->vf_id = vf_id; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + + if (rc) + BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc); + + else if (resp->hdr.status != PFVF_STATUS_SUCCESS) + BNX2X_ERR("Sending CLOSE failed: pf response was %d\n", + resp->hdr.status); + + bnx2x_vfpf_finalize(bp, &req->first_tlv); + +free_irq: + /* Disable HW interrupts, NAPI */ + bnx2x_netif_stop(bp, 0); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + + /* Release IRQs */ + bnx2x_free_irq(bp); +} + +static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_queue *q) +{ + u8 cl_id = vfq_cl_id(vf, q); + u8 func_id = FW_VF_HANDLE(vf->abs_vfid); + + /* mac */ + bnx2x_init_mac_obj(bp, &q->mac_obj, + cl_id, q->cid, func_id, + bnx2x_vf_sp(bp, vf, mac_rdata), + bnx2x_vf_sp_map(bp, vf, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX, + &bp->macs_pool); + /* vlan */ + bnx2x_init_vlan_obj(bp, &q->vlan_obj, + cl_id, q->cid, func_id, + bnx2x_vf_sp(bp, vf, vlan_rdata), + bnx2x_vf_sp_map(bp, vf, vlan_rdata), + BNX2X_FILTER_VLAN_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX, + &bp->vlans_pool); + + /* mcast */ + bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, + q->cid, func_id, func_id, + bnx2x_vf_sp(bp, vf, mcast_rdata), + bnx2x_vf_sp_map(bp, vf, mcast_rdata), + BNX2X_FILTER_MCAST_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX); + + /* rss */ + bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid, + func_id, func_id, + bnx2x_vf_sp(bp, vf, rss_rdata), + bnx2x_vf_sp_map(bp, vf, rss_rdata), + BNX2X_FILTER_RSS_CONF_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX); + + vf->leading_rss = cl_id; + q->is_leading = true; + q->sp_initialized = true; +} + +/* ask the pf to open a queue for the vf */ +int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool is_leading) +{ + struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + u8 fp_idx = fp->index; + u16 tpa_agg_size = 0, flags = 0; + int rc; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req)); + + /* select tpa mode to request */ + if (!fp->disable_tpa) { + flags |= VFPF_QUEUE_FLG_TPA; + flags |= VFPF_QUEUE_FLG_TPA_IPV6; + if (fp->mode == TPA_MODE_GRO) + flags |= VFPF_QUEUE_FLG_TPA_GRO; + tpa_agg_size = TPA_AGG_SIZE; + } + + if (is_leading) + flags |= VFPF_QUEUE_FLG_LEADING_RSS; + + /* calculate queue flags */ + flags |= VFPF_QUEUE_FLG_STATS; + flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; + flags |= VFPF_QUEUE_FLG_VLAN; + DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); + + /* Common */ + req->vf_qid = fp_idx; + req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID; + + /* Rx */ + req->rxq.rcq_addr = fp->rx_comp_mapping; + req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE; + req->rxq.rxq_addr = fp->rx_desc_mapping; + req->rxq.sge_addr = fp->rx_sge_mapping; + req->rxq.vf_sb = fp_idx; + req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS; + req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0; + req->rxq.mtu = bp->dev->mtu; + req->rxq.buf_sz = fp->rx_buf_size; + req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE; + req->rxq.tpa_agg_sz = tpa_agg_size; + req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT; + req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) & + (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; + req->rxq.flags = flags; + req->rxq.drop_flags = 0; + req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT; + req->rxq.stat_id = -1; /* No stats at the moment */ + + /* Tx */ + req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping; + req->txq.vf_sb = fp_idx; + req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0; + req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0; + req->txq.flags = flags; + req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) + BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n", + fp_idx); + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n", + fp_idx, resp->hdr.status); + rc = -EINVAL; + } + + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + +static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) +{ + struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int rc; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q, + sizeof(*req)); + + req->vf_qid = qidx; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + + if (rc) { + BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx, + rc); + goto out; + } + + /* PF failed the transaction */ + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx, + resp->hdr.status); + rc = -EINVAL; + } + +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + +/* request pf to add a mac for the vf */ +int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) +{ + struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; + int rc = 0; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, + sizeof(*req)); + + req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED; + req->vf_qid = vf_qid; + req->n_mac_vlan_filters = 1; + + req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID; + if (set) + req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC; + + /* sample bulletin board for new mac */ + bnx2x_sample_bulletin(bp); + + /* copy mac from device to request */ + memcpy(req->filters[0].mac, addr, ETH_ALEN); + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + /* send message to pf */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) { + BNX2X_ERR("failed to send message to pf. rc was %d\n", rc); + goto out; + } + + /* failure may mean PF was configured with a new mac for us */ + while (resp->hdr.status == PFVF_STATUS_FAILURE) { + DP(BNX2X_MSG_IOV, + "vfpf SET MAC failed. Check bulletin board for new posts\n"); + + /* copy mac from bulletin to device */ + memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); + + /* check if bulletin board was updated */ + if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) { + /* copy mac from device to request */ + memcpy(req->filters[0].mac, bp->dev->dev_addr, + ETH_ALEN); + + /* send message to pf */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, + bp->vf2pf_mbox_mapping); + } else { + /* no new info in bulletin */ + break; + } + } + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status); + rc = -EINVAL; + } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + +/* request pf to config rss table for vf queues*/ +int bnx2x_vfpf_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *params) +{ + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss; + int rc = 0; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS, + sizeof(*req)); + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key)); + req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE; + req->rss_key_size = T_ETH_RSS_KEY; + req->rss_result_mask = params->rss_result_mask; + + /* flags handled individually for backward/forward compatability */ + if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED)) + req->rss_flags |= VFPF_RSS_MODE_DISABLED; + if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR)) + req->rss_flags |= VFPF_RSS_MODE_REGULAR; + if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH)) + req->rss_flags |= VFPF_RSS_SET_SRCH; + if (params->rss_flags & (1 << BNX2X_RSS_IPV4)) + req->rss_flags |= VFPF_RSS_IPV4; + if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP)) + req->rss_flags |= VFPF_RSS_IPV4_TCP; + if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP)) + req->rss_flags |= VFPF_RSS_IPV4_UDP; + if (params->rss_flags & (1 << BNX2X_RSS_IPV6)) + req->rss_flags |= VFPF_RSS_IPV6; + if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP)) + req->rss_flags |= VFPF_RSS_IPV6_TCP; + if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP)) + req->rss_flags |= VFPF_RSS_IPV6_UDP; + + DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + /* send message to pf */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) { + BNX2X_ERR("failed to send message to pf. rc was %d\n", rc); + goto out; + } + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + /* Since older drivers don't support this feature (and VF has + * no way of knowing other than failing this), don't propagate + * an error in this case. + */ + DP(BNX2X_MSG_IOV, + "Failed to send rss message to PF over VF-PF channel [%d]\n", + resp->hdr.status); + } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + +int bnx2x_vfpf_set_mcast(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int rc, i = 0; + struct netdev_hw_addr *ha; + + if (bp->state != BNX2X_STATE_OPEN) { + DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); + return -EINVAL; + } + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, + sizeof(*req)); + + /* Get Rx mode requested */ + DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags); + + netdev_for_each_mc_addr(ha, dev) { + DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", + bnx2x_mc_addr(ha)); + memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN); + i++; + } + + /* We support four PFVF_MAX_MULTICAST_PER_VF mcast + * addresses tops + */ + if (i >= PFVF_MAX_MULTICAST_PER_VF) { + DP(NETIF_MSG_IFUP, + "VF supports not more than %d multicast MAC addresses\n", + PFVF_MAX_MULTICAST_PER_VF); + return -EINVAL; + } + + req->n_multicast = i; + req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED; + req->vf_qid = 0; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) { + BNX2X_ERR("Sending a message failed: %d\n", rc); + goto out; + } + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("Set Rx mode/multicast failed: %d\n", + resp->hdr.status); + rc = -EINVAL; + } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return 0; +} + +int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) +{ + int mode = bp->rx_mode; + struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int rc; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, + sizeof(*req)); + + DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode); + + /* Ignore everything accept MODE_NONE */ + if (mode == BNX2X_RX_MODE_NONE) { + req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE; + } else { + /* Current PF driver will not look at the specific flags, + * but they are required when working with older drivers on hv. + */ + req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST; + req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; + req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; + } + + req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED; + req->vf_qid = 0; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) + BNX2X_ERR("Sending a message failed: %d\n", rc); + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status); + rc = -EINVAL; + } + + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + +/* General service functions */ +static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid) +{ + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid); + + REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY); +} + +static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid) +{ + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid); + + REG_WR8(bp, addr, 1); +} + +static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp) +{ + int i; + + for_each_vf(bp, i) + storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid)); +} + +/* enable vf_pf mailbox (aka vf-pf-channel) */ +void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) +{ + bnx2x_vf_flr_clnup_epilog(bp, abs_vfid); + + /* enable the mailbox in the FW */ + storm_memset_vf_mbx_ack(bp, abs_vfid); + storm_memset_vf_mbx_valid(bp, abs_vfid); + + /* enable the VF access to the mailbox */ + bnx2x_vf_enable_access(bp, abs_vfid); +} + +/* this works only on !E1h */ +static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf, + dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi, + u32 vf_addr_lo, u32 len32) +{ + struct dmae_command dmae; + + if (CHIP_IS_E1x(bp)) { + BNX2X_ERR("Chip revision does not support VFs\n"); + return DMAE_NOT_RDY; + } + + if (!bp->dmae_ready) { + BNX2X_ERR("DMAE is not ready, can not copy\n"); + return DMAE_NOT_RDY; + } + + /* set opcode and fixed command fields */ + bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI); + + if (from_vf) { + dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) | + (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) | + (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT); + + dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT); + + dmae.src_addr_lo = vf_addr_lo; + dmae.src_addr_hi = vf_addr_hi; + dmae.dst_addr_lo = U64_LO(pf_addr); + dmae.dst_addr_hi = U64_HI(pf_addr); + } else { + dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) | + (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) | + (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT); + + dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT); + + dmae.src_addr_lo = U64_LO(pf_addr); + dmae.src_addr_hi = U64_HI(pf_addr); + dmae.dst_addr_lo = vf_addr_lo; + dmae.dst_addr_hi = vf_addr_hi; + } + dmae.len = len32; + + /* issue the command and wait for completion */ + return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); +} + +static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); + u16 length, type; + + /* prepare response */ + type = mbx->first_tlv.tl.type; + length = type == CHANNEL_TLV_ACQUIRE ? + sizeof(struct pfvf_acquire_resp_tlv) : + sizeof(struct pfvf_general_resp_tlv); + bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length); + bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); +} + +static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, + struct bnx2x_virtf *vf, + int vf_rc) +{ + struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); + struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; + dma_addr_t pf_addr; + u64 vf_addr; + int rc; + + bnx2x_dp_tlv_list(bp, resp); + DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", + mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); + + resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc); + + /* send response */ + vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) + + mbx->first_tlv.resp_msg_offset; + pf_addr = mbx->msg_mapping + + offsetof(struct bnx2x_vf_mbx_msg, resp); + + /* Copy the response buffer. The first u64 is written afterwards, as + * the vf is sensitive to the header being written + */ + vf_addr += sizeof(u64); + pf_addr += sizeof(u64); + rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, + U64_HI(vf_addr), + U64_LO(vf_addr), + (sizeof(union pfvf_tlvs) - sizeof(u64))/4); + if (rc) { + BNX2X_ERR("Failed to copy response body to VF %d\n", + vf->abs_vfid); + goto mbx_error; + } + vf_addr -= sizeof(u64); + pf_addr -= sizeof(u64); + + /* ack the FW */ + storm_memset_vf_mbx_ack(bp, vf->abs_vfid); + mmiowb(); + + /* copy the response header including status-done field, + * must be last dmae, must be after FW is acked + */ + rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, + U64_HI(vf_addr), + U64_LO(vf_addr), + sizeof(u64)/4); + + /* unlock channel mutex */ + bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); + + if (rc) { + BNX2X_ERR("Failed to copy response status to VF %d\n", + vf->abs_vfid); + goto mbx_error; + } + return; + +mbx_error: + bnx2x_vf_release(bp, vf); +} + +static void bnx2x_vf_mbx_resp(struct bnx2x *bp, + struct bnx2x_virtf *vf, + int rc) +{ + bnx2x_vf_mbx_resp_single_tlv(bp, vf); + bnx2x_vf_mbx_resp_send_msg(bp, vf, rc); +} + +static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, + struct bnx2x_virtf *vf, + void *buffer, + u16 *offset) +{ + struct vfpf_port_phys_id_resp_tlv *port_id; + + if (!(bp->flags & HAS_PHYS_PORT_ID)) + return; + + bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID, + sizeof(struct vfpf_port_phys_id_resp_tlv)); + + port_id = (struct vfpf_port_phys_id_resp_tlv *) + (((u8 *)buffer) + *offset); + memcpy(port_id->id, bp->phys_port_id, ETH_ALEN); + + /* Offset should continue representing the offset to the tail + * of TLV data (outside this function scope) + */ + *offset += sizeof(struct vfpf_port_phys_id_resp_tlv); +} + +static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx, int vfop_status) +{ + int i; + struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp; + struct pf_vf_resc *resc = &resp->resc; + u8 status = bnx2x_pfvf_status_codes(vfop_status); + u16 length; + + memset(resp, 0, sizeof(*resp)); + + /* fill in pfdev info */ + resp->pfdev_info.chip_num = bp->common.chip_id; + resp->pfdev_info.db_size = bp->db_size; + resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; + resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | + PFVF_CAP_TPA | + PFVF_CAP_TPA_UPDATE); + bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, + sizeof(resp->pfdev_info.fw_ver)); + + if (status == PFVF_STATUS_NO_RESOURCE || + status == PFVF_STATUS_SUCCESS) { + /* set resources numbers, if status equals NO_RESOURCE these + * are max possible numbers + */ + resc->num_rxqs = vf_rxq_count(vf) ? : + bnx2x_vf_max_queue_cnt(bp, vf); + resc->num_txqs = vf_txq_count(vf) ? : + bnx2x_vf_max_queue_cnt(bp, vf); + resc->num_sbs = vf_sb_count(vf); + resc->num_mac_filters = vf_mac_rules_cnt(vf); + resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf); + resc->num_mc_filters = 0; + + if (status == PFVF_STATUS_SUCCESS) { + /* fill in the allocated resources */ + struct pf_vf_bulletin_content *bulletin = + BP_VF_BULLETIN(bp, vf->index); + + for_each_vfq(vf, i) + resc->hw_qid[i] = + vfq_qzone_id(vf, vfq_get(vf, i)); + + for_each_vf_sb(vf, i) { + resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i); + resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i); + } + + /* if a mac has been set for this vf, supply it */ + if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) { + memcpy(resc->current_mac_addr, bulletin->mac, + ETH_ALEN); + } + } + } + + DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n" + "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n", + vf->abs_vfid, + resp->pfdev_info.chip_num, + resp->pfdev_info.db_size, + resp->pfdev_info.indices_per_sb, + resp->pfdev_info.pf_cap, + resc->num_rxqs, + resc->num_txqs, + resc->num_sbs, + resc->num_mac_filters, + resc->num_vlan_filters, + resc->num_mc_filters, + resp->pfdev_info.fw_ver); + + DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ "); + for (i = 0; i < vf_rxq_count(vf); i++) + DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]); + DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ "); + for (i = 0; i < vf_sb_count(vf); i++) + DP_CONT(BNX2X_MSG_IOV, "%d:%d ", + resc->hw_sbs[i].hw_sb_id, + resc->hw_sbs[i].sb_qid); + DP_CONT(BNX2X_MSG_IOV, "]\n"); + + /* prepare response */ + length = sizeof(struct pfvf_acquire_resp_tlv); + bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length); + + /* Handle possible VF requests for physical port identifiers. + * 'length' should continue to indicate the offset of the first empty + * place in the buffer (i.e., where next TLV should be inserted) + */ + if (bnx2x_search_tlv_list(bp, &mbx->msg->req, + CHANNEL_TLV_PHYS_PORT_ID)) + bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length); + + bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* send the response */ + bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status); +} + +static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + int rc; + struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire; + + /* log vfdef info */ + DP(BNX2X_MSG_IOV, + "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n", + vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os, + acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs, + acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters, + acquire->resc_request.num_vlan_filters, + acquire->resc_request.num_mc_filters); + + /* acquire the resources */ + rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request); + + /* store address of vf's bulletin board */ + vf->bulletin_map = acquire->bulletin_addr; + + /* response */ + bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc); +} + +static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct vfpf_init_tlv *init = &mbx->msg->req.init; + int rc; + + /* record ghost addresses from vf message */ + vf->spq_map = init->spq_addr; + vf->fw_stat_map = init->stats_addr; + vf->stats_stride = init->stats_stride; + rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); + + /* set VF multiqueue statistics collection mode */ + if (init->flags & VFPF_INIT_FLG_STATS_COALESCE) + vf->cfg_flags |= VF_CFG_STATS_COALESCE; + + /* response */ + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +/* convert MBX queue-flags to standard SP queue-flags */ +static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags, + unsigned long *sp_q_flags) +{ + if (mbx_q_flags & VFPF_QUEUE_FLG_TPA) + __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6) + __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO) + __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_STATS) + __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN) + __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_COS) + __set_bit(BNX2X_Q_FLG_COS, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_HC) + __set_bit(BNX2X_Q_FLG_HC, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) + __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS) + __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags); + + /* outer vlan removal is set according to PF's multi function mode */ + if (IS_MF_SD(bp)) + __set_bit(BNX2X_Q_FLG_OV, sp_q_flags); +} + +static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q; + struct bnx2x_vf_queue_construct_params qctor; + int rc = 0; + + /* verify vf_qid */ + if (setup_q->vf_qid >= vf_rxq_count(vf)) { + BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n", + setup_q->vf_qid, vf_rxq_count(vf)); + rc = -EINVAL; + goto response; + } + + /* tx queues must be setup alongside rx queues thus if the rx queue + * is not marked as valid there's nothing to do. + */ + if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) { + struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid); + unsigned long q_type = 0; + + struct bnx2x_queue_init_params *init_p; + struct bnx2x_queue_setup_params *setup_p; + + if (bnx2x_vfq_is_leading(q)) + bnx2x_leading_vfq_init(bp, vf, q); + + /* re-init the VF operation context */ + memset(&qctor, 0 , + sizeof(struct bnx2x_vf_queue_construct_params)); + setup_p = &qctor.prep_qsetup; + init_p = &qctor.qstate.params.init; + + /* activate immediately */ + __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags); + + if (setup_q->param_valid & VFPF_TXQ_VALID) { + struct bnx2x_txq_setup_params *txq_params = + &setup_p->txq_params; + + __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + + /* save sb resource index */ + q->sb_idx = setup_q->txq.vf_sb; + + /* tx init */ + init_p->tx.hc_rate = setup_q->txq.hc_rate; + init_p->tx.sb_cq_index = setup_q->txq.sb_index; + + bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags, + &init_p->tx.flags); + + /* tx setup - flags */ + bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags, + &setup_p->flags); + + /* tx setup - general, nothing */ + + /* tx setup - tx */ + txq_params->dscr_map = setup_q->txq.txq_addr; + txq_params->sb_cq_index = setup_q->txq.sb_index; + txq_params->traffic_type = setup_q->txq.traffic_type; + + bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p, + q->index, q->sb_idx); + } + + if (setup_q->param_valid & VFPF_RXQ_VALID) { + struct bnx2x_rxq_setup_params *rxq_params = + &setup_p->rxq_params; + + __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + + /* Note: there is no support for different SBs + * for TX and RX + */ + q->sb_idx = setup_q->rxq.vf_sb; + + /* rx init */ + init_p->rx.hc_rate = setup_q->rxq.hc_rate; + init_p->rx.sb_cq_index = setup_q->rxq.sb_index; + bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags, + &init_p->rx.flags); + + /* rx setup - flags */ + bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags, + &setup_p->flags); + + /* rx setup - general */ + setup_p->gen_params.mtu = setup_q->rxq.mtu; + + /* rx setup - rx */ + rxq_params->drop_flags = setup_q->rxq.drop_flags; + rxq_params->dscr_map = setup_q->rxq.rxq_addr; + rxq_params->sge_map = setup_q->rxq.sge_addr; + rxq_params->rcq_map = setup_q->rxq.rcq_addr; + rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr; + rxq_params->buf_sz = setup_q->rxq.buf_sz; + rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz; + rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt; + rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz; + rxq_params->cache_line_log = + setup_q->rxq.cache_line_log; + rxq_params->sb_cq_index = setup_q->rxq.sb_index; + + /* rx setup - multicast engine */ + if (bnx2x_vfq_is_leading(q)) { + u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid); + + rxq_params->mcast_engine_id = mcast_id; + __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags); + } + + bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p, + q->index, q->sb_idx); + } + /* complete the preparations */ + bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type); + + rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor); + if (rc) + goto response; + } +response: + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct vfpf_set_q_filters_tlv *tlv, + struct bnx2x_vf_mac_vlan_filters **pfl, + u32 type_flag) +{ + int i, j; + struct bnx2x_vf_mac_vlan_filters *fl = NULL; + size_t fsz; + + fsz = tlv->n_mac_vlan_filters * + sizeof(struct bnx2x_vf_mac_vlan_filter) + + sizeof(struct bnx2x_vf_mac_vlan_filters); + + fl = kzalloc(fsz, GFP_KERNEL); + if (!fl) + return -ENOMEM; + + for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) { + struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i]; + + if ((msg_filter->flags & type_flag) != type_flag) + continue; + if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) { + fl->filters[j].mac = msg_filter->mac; + fl->filters[j].type = BNX2X_VF_FILTER_MAC; + } else { + fl->filters[j].vid = msg_filter->vlan_tag; + fl->filters[j].type = BNX2X_VF_FILTER_VLAN; + } + fl->filters[j].add = + (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ? + true : false; + fl->count++; + } + if (!fl->count) + kfree(fl); + else + *pfl = fl; + + return 0; +} + +static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx, + struct vfpf_q_mac_vlan_filter *filter) +{ + DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags); + if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID) + DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag); + if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID) + DP_CONT(msglvl, ", MAC=%pM", filter->mac); + DP_CONT(msglvl, "\n"); +} + +static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl, + struct vfpf_set_q_filters_tlv *filters) +{ + int i; + + if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) + for (i = 0; i < filters->n_mac_vlan_filters; i++) + bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i, + &filters->filters[i]); + + if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) + DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask); + + if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) + for (i = 0; i < filters->n_multicast; i++) + DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]); +} + +#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID +#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID + +static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int rc = 0; + + struct vfpf_set_q_filters_tlv *msg = + &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters; + + /* check for any mac/vlan changes */ + if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { + /* build mac list */ + struct bnx2x_vf_mac_vlan_filters *fl = NULL; + + rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, + VFPF_MAC_FILTER); + if (rc) + goto op_err; + + if (fl) { + + /* set mac list */ + rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, + msg->vf_qid, + false); + if (rc) + goto op_err; + } + + /* build vlan list */ + fl = NULL; + + rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, + VFPF_VLAN_FILTER); + if (rc) + goto op_err; + + if (fl) { + /* set vlan list */ + rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, + msg->vf_qid, + false); + if (rc) + goto op_err; + } + } + + if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { + unsigned long accept = 0; + struct pf_vf_bulletin_content *bulletin = + BP_VF_BULLETIN(bp, vf->index); + + /* Ignore VF requested mode; instead set a regular mode */ + if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) { + __set_bit(BNX2X_ACCEPT_UNICAST, &accept); + __set_bit(BNX2X_ACCEPT_MULTICAST, &accept); + __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); + } + + /* A packet arriving the vf's mac should be accepted + * with any vlan, unless a vlan has already been + * configured. + */ + if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); + + /* set rx-mode */ + rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept); + if (rc) + goto op_err; + } + + if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) { + /* set mcasts */ + rc = bnx2x_vf_mcast(bp, vf, msg->multicast, + msg->n_multicast, false); + if (rc) + goto op_err; + } +op_err: + if (rc) + BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n", + vf->abs_vfid, msg->vf_qid, rc); + return rc; +} + +static int bnx2x_filters_validate_mac(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct vfpf_set_q_filters_tlv *filters) +{ + struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); + int rc = 0; + + /* if a mac was already set for this VF via the set vf mac ndo, we only + * accept mac configurations of that mac. Why accept them at all? + * because PF may have been unable to configure the mac at the time + * since queue was not set up. + */ + if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) { + /* once a mac was set by ndo can only accept a single mac... */ + if (filters->n_mac_vlan_filters > 1) { + BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n", + vf->abs_vfid); + rc = -EPERM; + goto response; + } + + /* ...and only the mac set by the ndo */ + if (filters->n_mac_vlan_filters == 1 && + !ether_addr_equal(filters->filters->mac, bulletin->mac)) { + BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n", + vf->abs_vfid); + + rc = -EPERM; + goto response; + } + } + +response: + return rc; +} + +static int bnx2x_filters_validate_vlan(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct vfpf_set_q_filters_tlv *filters) +{ + struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); + int rc = 0; + + /* if vlan was set by hypervisor we don't allow guest to config vlan */ + if (bulletin->valid_bitmap & 1 << VLAN_VALID) { + int i; + + /* search for vlan filters */ + for (i = 0; i < filters->n_mac_vlan_filters; i++) { + if (filters->filters[i].flags & + VFPF_Q_FILTER_VLAN_TAG_VALID) { + BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", + vf->abs_vfid); + rc = -EPERM; + goto response; + } + } + } + + /* verify vf_qid */ + if (filters->vf_qid > vf_rxq_count(vf)) { + rc = -EPERM; + goto response; + } + +response: + return rc; +} + +static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters; + int rc; + + rc = bnx2x_filters_validate_mac(bp, vf, filters); + if (rc) + goto response; + + rc = bnx2x_filters_validate_vlan(bp, vf, filters); + if (rc) + goto response; + + DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n", + vf->abs_vfid, + filters->vf_qid); + + /* print q_filter message */ + bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters); + + rc = bnx2x_vf_mbx_qfilters(bp, vf); +response: + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + int qid = mbx->msg->req.q_op.vf_qid; + int rc; + + DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n", + vf->abs_vfid, qid); + + rc = bnx2x_vf_queue_teardown(bp, vf, qid); + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + int rc; + + DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid); + + rc = bnx2x_vf_close(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + int rc; + + DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid); + + rc = bnx2x_vf_free(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct bnx2x_config_rss_params rss; + struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss; + int rc = 0; + + if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE || + rss_tlv->rss_key_size != T_ETH_RSS_KEY) { + BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n", + vf->index); + rc = -EINVAL; + goto mbx_resp; + } + + memset(&rss, 0, sizeof(struct bnx2x_config_rss_params)); + + /* set vfop params according to rss tlv */ + memcpy(rss.ind_table, rss_tlv->ind_table, + T_ETH_INDIRECTION_TABLE_SIZE); + memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key)); + rss.rss_obj = &vf->rss_conf_obj; + rss.rss_result_mask = rss_tlv->rss_result_mask; + + /* flags handled individually for backward/forward compatability */ + rss.rss_flags = 0; + rss.ramrod_flags = 0; + + if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) + __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) + __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH) + __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV4) + __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) + __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) + __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV6) + __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) + __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP) + __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags); + + if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) && + rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) || + (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) && + rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) { + BNX2X_ERR("about to hit a FW assert. aborting...\n"); + rc = -EINVAL; + goto mbx_resp; + } + + rc = bnx2x_vf_rss_update(bp, vf, &rss); +mbx_resp: + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +static int bnx2x_validate_tpa_params(struct bnx2x *bp, + struct vfpf_tpa_tlv *tpa_tlv) +{ + int rc = 0; + + if (tpa_tlv->tpa_client_info.max_sges_for_packet > + U_ETH_MAX_SGES_FOR_PACKET) { + rc = -EINVAL; + BNX2X_ERR("TPA update: max_sges received %d, max is %d\n", + tpa_tlv->tpa_client_info.max_sges_for_packet, + U_ETH_MAX_SGES_FOR_PACKET); + } + + if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) { + rc = -EINVAL; + BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n", + tpa_tlv->tpa_client_info.max_tpa_queues, + MAX_AGG_QS(bp)); + } + + return rc; +} + +static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct bnx2x_queue_update_tpa_params vf_op_params; + struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa; + int rc = 0; + + memset(&vf_op_params, 0, sizeof(vf_op_params)); + + if (bnx2x_validate_tpa_params(bp, tpa_tlv)) + goto mbx_resp; + + vf_op_params.complete_on_both_clients = + tpa_tlv->tpa_client_info.complete_on_both_clients; + vf_op_params.dont_verify_thr = + tpa_tlv->tpa_client_info.dont_verify_thr; + vf_op_params.max_agg_sz = + tpa_tlv->tpa_client_info.max_agg_size; + vf_op_params.max_sges_pkt = + tpa_tlv->tpa_client_info.max_sges_for_packet; + vf_op_params.max_tpa_queues = + tpa_tlv->tpa_client_info.max_tpa_queues; + vf_op_params.sge_buff_sz = + tpa_tlv->tpa_client_info.sge_buff_size; + vf_op_params.sge_pause_thr_high = + tpa_tlv->tpa_client_info.sge_pause_thr_high; + vf_op_params.sge_pause_thr_low = + tpa_tlv->tpa_client_info.sge_pause_thr_low; + vf_op_params.tpa_mode = + tpa_tlv->tpa_client_info.tpa_mode; + vf_op_params.update_ipv4 = + tpa_tlv->tpa_client_info.update_ipv4; + vf_op_params.update_ipv6 = + tpa_tlv->tpa_client_info.update_ipv6; + + rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params); + +mbx_resp: + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +/* dispatch request */ +static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + int i; + + /* check if tlv type is known */ + if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) { + /* Lock the per vf op mutex and note the locker's identity. + * The unlock will take place in mbx response. + */ + bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); + + /* switch on the opcode */ + switch (mbx->first_tlv.tl.type) { + case CHANNEL_TLV_ACQUIRE: + bnx2x_vf_mbx_acquire(bp, vf, mbx); + return; + case CHANNEL_TLV_INIT: + bnx2x_vf_mbx_init_vf(bp, vf, mbx); + return; + case CHANNEL_TLV_SETUP_Q: + bnx2x_vf_mbx_setup_q(bp, vf, mbx); + return; + case CHANNEL_TLV_SET_Q_FILTERS: + bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); + return; + case CHANNEL_TLV_TEARDOWN_Q: + bnx2x_vf_mbx_teardown_q(bp, vf, mbx); + return; + case CHANNEL_TLV_CLOSE: + bnx2x_vf_mbx_close_vf(bp, vf, mbx); + return; + case CHANNEL_TLV_RELEASE: + bnx2x_vf_mbx_release_vf(bp, vf, mbx); + return; + case CHANNEL_TLV_UPDATE_RSS: + bnx2x_vf_mbx_update_rss(bp, vf, mbx); + return; + case CHANNEL_TLV_UPDATE_TPA: + bnx2x_vf_mbx_update_tpa(bp, vf, mbx); + return; + } + + } else { + /* unknown TLV - this may belong to a VF driver from the future + * - a version written after this PF driver was written, which + * supports features unknown as of yet. Too bad since we don't + * support them. Or this may be because someone wrote a crappy + * VF driver and is sending garbage over the channel. + */ + BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n", + mbx->first_tlv.tl.type, mbx->first_tlv.tl.length, + vf->state); + for (i = 0; i < 20; i++) + DP_CONT(BNX2X_MSG_IOV, "%x ", + mbx->msg->req.tlv_buf_size.tlv_buffer[i]); + } + + /* can we respond to VF (do we have an address for it?) */ + if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { + /* notify the VF that we do not support this request */ + bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED); + } else { + /* can't send a response since this VF is unknown to us + * just ack the FW to release the mailbox and unlock + * the channel. + */ + storm_memset_vf_mbx_ack(bp, vf->abs_vfid); + /* Firmware ack should be written before unlocking channel */ + mmiowb(); + bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); + } +} + +void bnx2x_vf_mbx_schedule(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event) +{ + u8 vf_idx; + + DP(BNX2X_MSG_IOV, + "vf pf event received: vfid %d, address_hi %x, address lo %x", + vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo); + /* Sanity checks consider removing later */ + + /* check if the vf_id is valid */ + if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf > + BNX2X_NR_VIRTFN(bp)) { + BNX2X_ERR("Illegal vf_id %d max allowed: %d\n", + vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp)); + return; + } + + vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id); + + /* Update VFDB with current message and schedule its handling */ + mutex_lock(&BP_VFDB(bp)->event_mutex); + BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi; + BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo; + BP_VFDB(bp)->event_occur |= (1ULL << vf_idx); + mutex_unlock(&BP_VFDB(bp)->event_mutex); + + bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG); +} + +/* handle new vf-pf messages */ +void bnx2x_vf_mbx(struct bnx2x *bp) +{ + struct bnx2x_vfdb *vfdb = BP_VFDB(bp); + u64 events; + u8 vf_idx; + int rc; + + if (!vfdb) + return; + + mutex_lock(&vfdb->event_mutex); + events = vfdb->event_occur; + vfdb->event_occur = 0; + mutex_unlock(&vfdb->event_mutex); + + for_each_vf(bp, vf_idx) { + struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx); + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + + /* Handle VFs which have pending events */ + if (!(events & (1ULL << vf_idx))) + continue; + + DP(BNX2X_MSG_IOV, + "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n", + vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo, + mbx->first_tlv.resp_msg_offset); + + /* dmae to get the VF request */ + rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, + vf->abs_vfid, mbx->vf_addr_hi, + mbx->vf_addr_lo, + sizeof(union vfpf_tlvs)/4); + if (rc) { + BNX2X_ERR("Failed to copy request VF %d\n", + vf->abs_vfid); + bnx2x_vf_release(bp, vf); + return; + } + + /* process the VF message header */ + mbx->first_tlv = mbx->msg->req.first_tlv; + + /* Clean response buffer to refrain from falsely + * seeing chains. + */ + memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs)); + + /* dispatch the request (will prepare the response) */ + bnx2x_vf_mbx_request(bp, vf, mbx); + } +} + +/* propagate local bulletin board to vf */ +int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf) +{ + struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf); + dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping + + vf * BULLETIN_CONTENT_SIZE; + dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map); + int rc; + + /* can only update vf after init took place */ + if (bnx2x_vf(bp, vf, state) != VF_ENABLED && + bnx2x_vf(bp, vf, state) != VF_ACQUIRED) + return 0; + + /* increment bulletin board version and compute crc */ + bulletin->version++; + bulletin->length = BULLETIN_CONTENT_SIZE; + bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin); + + /* propagate bulletin board via dmae to vm memory */ + rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, + bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr), + U64_LO(vf_addr), bulletin->length / 4); + return rc; +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h new file mode 100644 index 00000000000..e21e706762c --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -0,0 +1,434 @@ +/* bnx2x_vfpf.h: Broadcom Everest network driver. + * + * Copyright (c) 2011-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> + * Written by: Ariel Elior <ariel.elior@qlogic.com> + */ +#ifndef VF_PF_IF_H +#define VF_PF_IF_H + +#ifdef CONFIG_BNX2X_SRIOV + +/* Common definitions for all HVs */ +struct vf_pf_resc_request { + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; /* No limit so superfluous */ +}; + +struct hw_sb_info { + u8 hw_sb_id; /* aka absolute igu id, used to ack the sb */ + u8 sb_qid; /* used to update DHC for sb */ +}; + +/* HW VF-PF channel definitions + * A.K.A VF-PF mailbox + */ +#define TLV_BUFFER_SIZE 1024 +#define PF_VF_BULLETIN_SIZE 512 + +#define VFPF_QUEUE_FLG_TPA 0x0001 +#define VFPF_QUEUE_FLG_TPA_IPV6 0x0002 +#define VFPF_QUEUE_FLG_TPA_GRO 0x0004 +#define VFPF_QUEUE_FLG_CACHE_ALIGN 0x0008 +#define VFPF_QUEUE_FLG_STATS 0x0010 +#define VFPF_QUEUE_FLG_OV 0x0020 +#define VFPF_QUEUE_FLG_VLAN 0x0040 +#define VFPF_QUEUE_FLG_COS 0x0080 +#define VFPF_QUEUE_FLG_HC 0x0100 +#define VFPF_QUEUE_FLG_DHC 0x0200 +#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400 + +#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0) +#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1) +#define VFPF_QUEUE_DROP_TTL0 (1 << 2) +#define VFPF_QUEUE_DROP_UDP_CS_ERR (1 << 3) + +#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000 +#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001 +#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002 +#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004 +#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008 +#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010 +#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content)) +#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */ +#define BULLETIN_CRC_SEED 0 + +enum { + PFVF_STATUS_WAITING = 0, + PFVF_STATUS_SUCCESS, + PFVF_STATUS_FAILURE, + PFVF_STATUS_NOT_SUPPORTED, + PFVF_STATUS_NO_RESOURCE +}; + +/* vf pf channel tlvs */ +/* general tlv header (used for both vf->pf request and pf->vf response) */ +struct channel_tlv { + u16 type; + u16 length; +}; + +/* header of first vf->pf tlv carries the offset used to calculate response + * buffer address + */ +struct vfpf_first_tlv { + struct channel_tlv tl; + u32 resp_msg_offset; +}; + +/* header of pf->vf tlvs, carries the status of handling the request */ +struct pfvf_tlv { + struct channel_tlv tl; + u8 status; + u8 padding[3]; +}; + +/* response tlv used for most tlvs */ +struct pfvf_general_resp_tlv { + struct pfvf_tlv hdr; +}; + +/* used to terminate and pad a tlv list */ +struct channel_list_end_tlv { + struct channel_tlv tl; + u8 padding[4]; +}; + +/* Acquire */ +struct vfpf_acquire_tlv { + struct vfpf_first_tlv first_tlv; + + struct vf_pf_vfdev_info { + /* the following fields are for debug purposes */ + u8 vf_id; /* ME register value */ + u8 vf_os; /* e.g. Linux, W2K8 */ + u8 padding[2]; + } vfdev_info; + + struct vf_pf_resc_request resc_request; + + aligned_u64 bulletin_addr; +}; + +/* simple operation request on queue */ +struct vfpf_q_op_tlv { + struct vfpf_first_tlv first_tlv; + u8 vf_qid; + u8 padding[3]; +}; + +/* receive side scaling tlv */ +struct vfpf_rss_tlv { + struct vfpf_first_tlv first_tlv; + u32 rss_flags; +#define VFPF_RSS_MODE_DISABLED (1 << 0) +#define VFPF_RSS_MODE_REGULAR (1 << 1) +#define VFPF_RSS_SET_SRCH (1 << 2) +#define VFPF_RSS_IPV4 (1 << 3) +#define VFPF_RSS_IPV4_TCP (1 << 4) +#define VFPF_RSS_IPV4_UDP (1 << 5) +#define VFPF_RSS_IPV6 (1 << 6) +#define VFPF_RSS_IPV6_TCP (1 << 7) +#define VFPF_RSS_IPV6_UDP (1 << 8) + u8 rss_result_mask; + u8 ind_table_size; + u8 rss_key_size; + u8 padding; + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + u32 rss_key[T_ETH_RSS_KEY]; /* hash values */ +}; + +/* acquire response tlv - carries the allocated resources */ +struct pfvf_acquire_resp_tlv { + struct pfvf_tlv hdr; + struct pf_vf_pfdev_info { + u32 chip_num; + u32 pf_cap; +#define PFVF_CAP_RSS 0x00000001 +#define PFVF_CAP_DHC 0x00000002 +#define PFVF_CAP_TPA 0x00000004 +#define PFVF_CAP_TPA_UPDATE 0x00000008 + char fw_ver[32]; + u16 db_size; + u8 indices_per_sb; + u8 padding; + } pfdev_info; + struct pf_vf_resc { + /* in case of status NO_RESOURCE in message hdr, pf will fill + * this struct with suggested amount of resources for next + * acquire request + */ +#define PFVF_MAX_QUEUES_PER_VF 16 +#define PFVF_MAX_SBS_PER_VF 16 + struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF]; + u8 hw_qid[PFVF_MAX_QUEUES_PER_VF]; + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; + u8 permanent_mac_addr[ETH_ALEN]; + u8 current_mac_addr[ETH_ALEN]; + u8 padding[2]; + } resc; +}; + +struct vfpf_port_phys_id_resp_tlv { + struct channel_tlv tl; + u8 id[ETH_ALEN]; + u8 padding[2]; +}; + +#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues + * stats will be coalesced on + * the leading RSS queue + */ + +/* Init VF */ +struct vfpf_init_tlv { + struct vfpf_first_tlv first_tlv; + aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */ + aligned_u64 spq_addr; + aligned_u64 stats_addr; + u16 stats_stride; + u32 flags; + u32 padding[2]; +}; + +/* Setup Queue */ +struct vfpf_setup_q_tlv { + struct vfpf_first_tlv first_tlv; + + struct vf_pf_rxq_params { + /* physical addresses */ + aligned_u64 rcq_addr; + aligned_u64 rcq_np_addr; + aligned_u64 rxq_addr; + aligned_u64 sge_addr; + + /* sb + hc info */ + u8 vf_sb; /* index in hw_sbs[] */ + u8 sb_index; /* Index in the SB */ + u16 hc_rate; /* desired interrupts per sec. */ + /* valid iff VFPF_QUEUE_FLG_HC */ + /* rx buffer info */ + u16 mtu; + u16 buf_sz; + u16 flags; /* VFPF_QUEUE_FLG_X flags */ + u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */ + + /* valid iff VFPF_QUEUE_FLG_TPA */ + u16 sge_buf_sz; + u16 tpa_agg_sz; + u8 max_sge_pkt; + + u8 drop_flags; /* VFPF_QUEUE_DROP_X, for Linux VMs + * all the flags are turned off + */ + + u8 cache_line_log; /* VFPF_QUEUE_FLG_CACHE_ALIGN */ + u8 padding; + } rxq; + + struct vf_pf_txq_params { + /* physical addresses */ + aligned_u64 txq_addr; + + /* sb + hc info */ + u8 vf_sb; /* index in hw_sbs[] */ + u8 sb_index; /* Index in the SB */ + u16 hc_rate; /* desired interrupts per sec. */ + /* valid iff VFPF_QUEUE_FLG_HC */ + u32 flags; /* VFPF_QUEUE_FLG_X flags */ + u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */ + u8 traffic_type; /* see in setup_context() */ + u8 padding; + } txq; + + u8 vf_qid; /* index in hw_qid[] */ + u8 param_valid; +#define VFPF_RXQ_VALID 0x01 +#define VFPF_TXQ_VALID 0x02 + u8 padding[2]; +}; + +/* Set Queue Filters */ +struct vfpf_q_mac_vlan_filter { + u32 flags; +#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01 +#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02 +#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */ + u8 mac[ETH_ALEN]; + u16 vlan_tag; +}; + +/* configure queue filters */ +struct vfpf_set_q_filters_tlv { + struct vfpf_first_tlv first_tlv; + + u32 flags; +#define VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED 0x01 +#define VFPF_SET_Q_FILTERS_MULTICAST_CHANGED 0x02 +#define VFPF_SET_Q_FILTERS_RX_MASK_CHANGED 0x04 + + u8 vf_qid; /* index in hw_qid[] */ + u8 n_mac_vlan_filters; + u8 n_multicast; + u8 padding; + +#define PFVF_MAX_MAC_FILTERS 16 +#define PFVF_MAX_VLAN_FILTERS 16 +#define PFVF_MAX_FILTERS (PFVF_MAX_MAC_FILTERS +\ + PFVF_MAX_VLAN_FILTERS) + struct vfpf_q_mac_vlan_filter filters[PFVF_MAX_FILTERS]; + +#define PFVF_MAX_MULTICAST_PER_VF 32 + u8 multicast[PFVF_MAX_MULTICAST_PER_VF][ETH_ALEN]; + + u32 rx_mask; /* see mask constants at the top of the file */ +}; + +struct vfpf_tpa_tlv { + struct vfpf_first_tlv first_tlv; + + struct vf_pf_tpa_client_info { + aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF]; + u8 update_ipv4; + u8 update_ipv6; + u8 max_tpa_queues; + u8 max_sges_for_packet; + u8 complete_on_both_clients; + u8 dont_verify_thr; + u8 tpa_mode; + u16 sge_buff_size; + u16 max_agg_size; + u16 sge_pause_thr_low; + u16 sge_pause_thr_high; + } tpa_client_info; +}; + +/* close VF (disable VF) */ +struct vfpf_close_tlv { + struct vfpf_first_tlv first_tlv; + u16 vf_id; /* for debug */ + u8 padding[2]; +}; + +/* release the VF's acquired resources */ +struct vfpf_release_tlv { + struct vfpf_first_tlv first_tlv; + u16 vf_id; + u8 padding[2]; +}; + +struct tlv_buffer_size { + u8 tlv_buffer[TLV_BUFFER_SIZE]; +}; + +union vfpf_tlvs { + struct vfpf_first_tlv first_tlv; + struct vfpf_acquire_tlv acquire; + struct vfpf_init_tlv init; + struct vfpf_close_tlv close; + struct vfpf_q_op_tlv q_op; + struct vfpf_setup_q_tlv setup_q; + struct vfpf_set_q_filters_tlv set_q_filters; + struct vfpf_release_tlv release; + struct vfpf_rss_tlv update_rss; + struct vfpf_tpa_tlv update_tpa; + struct channel_list_end_tlv list_end; + struct tlv_buffer_size tlv_buf_size; +}; + +union pfvf_tlvs { + struct pfvf_general_resp_tlv general_resp; + struct pfvf_acquire_resp_tlv acquire_resp; + struct channel_list_end_tlv list_end; + struct tlv_buffer_size tlv_buf_size; +}; + +/* This is a structure which is allocated in the VF, which the PF may update + * when it deems it necessary to do so. The bulletin board is sampled + * periodically by the VF. A copy per VF is maintained in the PF (to prevent + * loss of data upon multiple updates (or the need for read modify write)). + */ +struct pf_vf_bulletin_size { + u8 size[PF_VF_BULLETIN_SIZE]; +}; + +struct pf_vf_bulletin_content { + u32 crc; /* crc of structure to ensure is not in + * mid-update + */ + u16 version; + u16 length; + + aligned_u64 valid_bitmap; /* bitmap indicating which fields + * hold valid values + */ + +#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address + * is available for it + */ +#define VLAN_VALID 1 /* when set, the vf should not access + * the vfpf channel + */ +#define CHANNEL_DOWN 2 /* vfpf channel is disabled. VFs are not + * to attempt to send messages on the + * channel after this bit is set + */ + u8 mac[ETH_ALEN]; + u8 mac_padding[2]; + + u16 vlan; + u8 vlan_padding[6]; +}; + +union pf_vf_bulletin { + struct pf_vf_bulletin_content content; + struct pf_vf_bulletin_size size; +}; + +#define MAX_TLVS_IN_LIST 50 + +enum channel_tlvs { + CHANNEL_TLV_NONE, + CHANNEL_TLV_ACQUIRE, + CHANNEL_TLV_INIT, + CHANNEL_TLV_SETUP_Q, + CHANNEL_TLV_SET_Q_FILTERS, + CHANNEL_TLV_ACTIVATE_Q, + CHANNEL_TLV_DEACTIVATE_Q, + CHANNEL_TLV_TEARDOWN_Q, + CHANNEL_TLV_CLOSE, + CHANNEL_TLV_RELEASE, + CHANNEL_TLV_UPDATE_RSS_DEPRECATED, + CHANNEL_TLV_PF_RELEASE_VF, + CHANNEL_TLV_LIST_END, + CHANNEL_TLV_FLR, + CHANNEL_TLV_PF_SET_MAC, + CHANNEL_TLV_PF_SET_VLAN, + CHANNEL_TLV_UPDATE_RSS, + CHANNEL_TLV_PHYS_PORT_ID, + CHANNEL_TLV_UPDATE_TPA, + CHANNEL_TLV_MAX +}; + +#endif /* CONFIG_BNX2X_SRIOV */ +#endif /* VF_PF_IF_H */ |
