diff options
Diffstat (limited to 'drivers/net/ethernet/brocade')
25 files changed, 2417 insertions, 1680 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c index 689e5e19cc0..550d2521ba7 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cee.c +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c @@ -52,13 +52,7 @@ bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg) } /** - * bfa_cee_attr_meminfo() - * - * @brief Returns the size of the DMA memory needed by CEE attributes - * - * @param[in] void - * - * @return Size of DMA region + * bfa_cee_attr_meminfo - Returns the size of the DMA memory needed by CEE attributes */ static u32 bfa_cee_attr_meminfo(void) @@ -66,13 +60,7 @@ bfa_cee_attr_meminfo(void) return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ); } /** - * bfa_cee_stats_meminfo() - * - * @brief Returns the size of the DMA memory needed by CEE stats - * - * @param[in] void - * - * @return Size of DMA region + * bfa_cee_stats_meminfo - Returns the size of the DMA memory needed by CEE stats */ static u32 bfa_cee_stats_meminfo(void) @@ -81,14 +69,10 @@ bfa_cee_stats_meminfo(void) } /** - * bfa_cee_get_attr_isr() - * - * @brief CEE ISR for get-attributes responses from f/w - * - * @param[in] cee - Pointer to the CEE module - * status - Return status from the f/w + * bfa_cee_get_attr_isr - CEE ISR for get-attributes responses from f/w * - * @return void + * @cee: Pointer to the CEE module + * @status: Return status from the f/w */ static void bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status) @@ -105,14 +89,10 @@ bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status) } /** - * bfa_cee_get_attr_isr() - * - * @brief CEE ISR for get-stats responses from f/w + * bfa_cee_get_attr_isr - CEE ISR for get-stats responses from f/w * - * @param[in] cee - Pointer to the CEE module - * status - Return status from the f/w - * - * @return void + * @cee: Pointer to the CEE module + * @status: Return status from the f/w */ static void bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status) @@ -147,13 +127,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status) cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status); } /** - * bfa_nw_cee_meminfo() - * - * @brief Returns the size of the DMA memory needed by CEE module - * - * @param[in] void - * - * @return Size of DMA region + * bfa_nw_cee_meminfo - Returns the size of the DMA memory needed by CEE module */ u32 bfa_nw_cee_meminfo(void) @@ -162,15 +136,11 @@ bfa_nw_cee_meminfo(void) } /** - * bfa_nw_cee_mem_claim() - * - * @brief Initialized CEE DMA Memory - * - * @param[in] cee CEE module pointer - * dma_kva Kernel Virtual Address of CEE DMA Memory - * dma_pa Physical Address of CEE DMA Memory + * bfa_nw_cee_mem_claim - Initialized CEE DMA Memory * - * @return void + * @cee: CEE module pointer + * @dma_kva: Kernel Virtual Address of CEE DMA Memory + * @dma_pa: Physical Address of CEE DMA Memory */ void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) @@ -185,13 +155,11 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) } /** - * bfa_cee_get_attr() - * - * @brief Send the request to the f/w to fetch CEE attributes. + * bfa_cee_get_attr - Send the request to the f/w to fetch CEE attributes. * - * @param[in] Pointer to the CEE module data structure. + * @cee: Pointer to the CEE module data structure. * - * @return Status + * Return: status */ enum bfa_status bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr, @@ -220,13 +188,7 @@ bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr, } /** - * bfa_cee_isrs() - * - * @brief Handles Mail-box interrupts for CEE module. - * - * @param[in] Pointer to the CEE module data structure. - * - * @return void + * bfa_cee_isrs - Handles Mail-box interrupts for CEE module. */ static void @@ -253,14 +215,9 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m) } /** - * bfa_cee_notify() - * - * @brief CEE module heart-beat failure handler. - * @brief CEE module IOC event handler. - * - * @param[in] IOC event type + * bfa_cee_notify - CEE module heart-beat failure handler. * - * @return void + * @event: IOC event type */ static void @@ -307,17 +264,13 @@ bfa_cee_notify(void *arg, enum bfa_ioc_event event) } /** - * bfa_nw_cee_attach() - * - * @brief CEE module-attach API + * bfa_nw_cee_attach - CEE module-attach API * - * @param[in] cee - Pointer to the CEE module data structure - * ioc - Pointer to the ioc module data structure - * dev - Pointer to the device driver module data structure - * The device driver specific mbox ISR functions have - * this pointer as one of the parameters. - * - * @return void + * @cee: Pointer to the CEE module data structure + * @ioc: Pointer to the ioc module data structure + * @dev: Pointer to the device driver module data structure. + * The device driver specific mbox ISR functions have + * this pointer as one of the parameters. */ void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h index 3da1a946ccd..ad004a4c389 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h @@ -16,23 +16,18 @@ * www.brocade.com */ -/** - * @file bfa_cs.h BFA common services - */ +/* BFA common services */ #ifndef __BFA_CS_H__ #define __BFA_CS_H__ #include "cna.h" -/** - * @ BFA state machine interfaces - */ +/* BFA state machine interfaces */ typedef void (*bfa_sm_t)(void *sm, int event); -/** - * oc - object class eg. bfa_ioc +/* oc - object class eg. bfa_ioc * st - state, eg. reset * otype - object type, eg. struct bfa_ioc * etype - object type, eg. enum ioc_event @@ -45,9 +40,7 @@ typedef void (*bfa_sm_t)(void *sm, int event); #define bfa_sm_get_state(_sm) ((_sm)->sm) #define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) -/** - * For converting from state machine function to state encoding. - */ +/* For converting from state machine function to state encoding. */ struct bfa_sm_table { bfa_sm_t sm; /*!< state machine function */ int state; /*!< state machine encoding */ @@ -55,13 +48,10 @@ struct bfa_sm_table { }; #define BFA_SM(_sm) ((bfa_sm_t)(_sm)) -/** - * State machine with entry actions. - */ +/* State machine with entry actions. */ typedef void (*bfa_fsm_t)(void *fsm, int event); -/** - * oc - object class eg. bfa_ioc +/* oc - object class eg. bfa_ioc * st - state, eg. reset * otype - object type, eg. struct bfa_ioc * etype - object type, eg. enum ioc_event @@ -90,9 +80,7 @@ bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm) return smt[i].state; } -/** - * @ Generic wait counter. - */ +/* Generic wait counter. */ typedef void (*bfa_wc_resume_t) (void *cbarg); @@ -116,9 +104,7 @@ bfa_wc_down(struct bfa_wc *wc) wc->wc_resume(wc->wc_cbarg); } -/** - * Initialize a waiting counter. - */ +/* Initialize a waiting counter. */ static inline void bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) { @@ -128,9 +114,7 @@ bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) bfa_wc_up(wc); } -/** - * Wait for counter to reach zero - */ +/* Wait for counter to reach zero */ static inline void bfa_wc_wait(struct bfa_wc *wc) { diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h index 48f87733739..b7d8127c198 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h @@ -26,13 +26,9 @@ #define BFA_STRING_32 32 #define BFA_VERSION_LEN 64 -/** - * ---------------------- adapter definitions ------------ - */ +/* ---------------------- adapter definitions ------------ */ -/** - * BFA adapter level attributes. - */ +/* BFA adapter level attributes. */ enum { BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE), /* @@ -74,18 +70,14 @@ struct bfa_adapter_attr { u8 trunk_capable; }; -/** - * ---------------------- IOC definitions ------------ - */ +/* ---------------------- IOC definitions ------------ */ enum { BFA_IOC_DRIVER_LEN = 16, BFA_IOC_CHIP_REV_LEN = 8, }; -/** - * Driver and firmware versions. - */ +/* Driver and firmware versions. */ struct bfa_ioc_driver_attr { char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */ char driver_ver[BFA_VERSION_LEN]; /*!< driver version */ @@ -95,9 +87,7 @@ struct bfa_ioc_driver_attr { char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */ }; -/** - * IOC PCI device attributes - */ +/* IOC PCI device attributes */ struct bfa_ioc_pci_attr { u16 vendor_id; /*!< PCI vendor ID */ u16 device_id; /*!< PCI device ID */ @@ -108,9 +98,7 @@ struct bfa_ioc_pci_attr { char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */ }; -/** - * IOC states - */ +/* IOC states */ enum bfa_ioc_state { BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */ BFA_IOC_RESET = 2, /*!< IOC is in reset state */ @@ -127,9 +115,7 @@ enum bfa_ioc_state { BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */ }; -/** - * IOC firmware stats - */ +/* IOC firmware stats */ struct bfa_fw_ioc_stats { u32 enable_reqs; u32 disable_reqs; @@ -139,9 +125,7 @@ struct bfa_fw_ioc_stats { u32 unknown_reqs; }; -/** - * IOC driver stats - */ +/* IOC driver stats */ struct bfa_ioc_drv_stats { u32 ioc_isrs; u32 ioc_enables; @@ -157,9 +141,7 @@ struct bfa_ioc_drv_stats { u32 rsvd; }; -/** - * IOC statistics - */ +/* IOC statistics */ struct bfa_ioc_stats { struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */ struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */ @@ -171,9 +153,7 @@ enum bfa_ioc_type { BFA_IOC_TYPE_LL = 3, }; -/** - * IOC attributes returned in queries - */ +/* IOC attributes returned in queries */ struct bfa_ioc_attr { enum bfa_ioc_type ioc_type; enum bfa_ioc_state state; /*!< IOC state */ @@ -184,25 +164,20 @@ struct bfa_ioc_attr { u8 port_mode; /*!< enum bfa_mode */ u8 cap_bm; /*!< capability */ u8 port_mode_cfg; /*!< enum bfa_mode */ - u8 rsvd[4]; /*!< 64bit align */ + u8 def_fn; /*!< 1 if default fn */ + u8 rsvd[3]; /*!< 64bit align */ }; -/** - * Adapter capability mask definition - */ +/* Adapter capability mask definition */ enum { BFA_CM_HBA = 0x01, BFA_CM_CNA = 0x02, BFA_CM_NIC = 0x04, }; -/** - * ---------------------- mfg definitions ------------ - */ +/* ---------------------- mfg definitions ------------ */ -/** - * Checksum size - */ +/* Checksum size */ #define BFA_MFG_CHKSUM_SIZE 16 #define BFA_MFG_PARTNUM_SIZE 14 @@ -213,8 +188,7 @@ enum { #pragma pack(1) -/** - * @brief BFA adapter manufacturing block definition. +/* BFA adapter manufacturing block definition. * * All numerical fields are in big-endian format. */ @@ -256,9 +230,7 @@ struct bfa_mfg_block { #pragma pack() -/** - * ---------------------- pci definitions ------------ - */ +/* ---------------------- pci definitions ------------ */ /* * PCI device ID information @@ -275,9 +247,7 @@ enum { #define bfa_asic_id_ctc(device) \ (bfa_asic_id_ct(device) || bfa_asic_id_ct2(device)) -/** - * PCI sub-system device and vendor ID information - */ +/* PCI sub-system device and vendor ID information */ enum { BFA_PCI_FCOE_SSDEVICE_ID = 0x14, BFA_PCI_CT2_SSID_FCoE = 0x22, diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h index 8ab33ee2c2b..b39c5f23974 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h @@ -20,10 +20,7 @@ #include "bfa_defs.h" -/** - * @brief - * FC physical port statistics. - */ +/* FC physical port statistics. */ struct bfa_port_fc_stats { u64 secs_reset; /*!< Seconds since stats is reset */ u64 tx_frames; /*!< Tx frames */ @@ -59,10 +56,7 @@ struct bfa_port_fc_stats { u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */ }; -/** - * @brief - * Eth Physical Port statistics. - */ +/* Eth Physical Port statistics. */ struct bfa_port_eth_stats { u64 secs_reset; /*!< Seconds since stats is reset */ u64 frame_64; /*!< Frames 64 bytes */ @@ -108,10 +102,7 @@ struct bfa_port_eth_stats { u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */ }; -/** - * @brief - * Port statistics. - */ +/* Port statistics. */ union bfa_port_stats_u { struct bfa_port_fc_stats fc; struct bfa_port_eth_stats eth; diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h index 6681fe87c1e..7fb396fe679 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h @@ -20,33 +20,23 @@ #include "bfa_defs.h" -/** - * Manufacturing block version - */ +/* Manufacturing block version */ #define BFA_MFG_VERSION 3 #define BFA_MFG_VERSION_UNINIT 0xFF -/** - * Manufacturing block encrypted version - */ +/* Manufacturing block encrypted version */ #define BFA_MFG_ENC_VER 2 -/** - * Manufacturing block version 1 length - */ +/* Manufacturing block version 1 length */ #define BFA_MFG_VER1_LEN 128 -/** - * Manufacturing block header length - */ +/* Manufacturing block header length */ #define BFA_MFG_HDR_LEN 4 #define BFA_MFG_SERIALNUM_SIZE 11 #define STRSZ(_n) (((_n) + 4) & ~3) -/** - * Manufacturing card type - */ +/* Manufacturing card type */ enum { BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */ BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */ @@ -70,9 +60,7 @@ enum { #pragma pack(1) -/** - * Check if Mezz card - */ +/* Check if Mezz card */ #define bfa_mfg_is_mezz(type) (( \ (type) == BFA_MFG_TYPE_JAYHAWK || \ (type) == BFA_MFG_TYPE_WANCHESE || \ @@ -127,9 +115,7 @@ do { \ } \ } while (0) -/** - * VPD data length - */ +/* VPD data length */ #define BFA_MFG_VPD_LEN 512 #define BFA_MFG_VPD_LEN_INVALID 0 @@ -137,9 +123,7 @@ do { \ #define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */ #define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */ -/** - * VPD vendor tag - */ +/* VPD vendor tag */ enum { BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */ BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */ @@ -151,8 +135,7 @@ enum { BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */ }; -/** - * @brief BFA adapter flash vpd data definition. +/* BFA adapter flash vpd data definition. * * All numerical fields are in big-endian format. */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h index 7c5fe6c2e80..ea9af9ae754 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h @@ -18,8 +18,7 @@ #ifndef __BFA_DEFS_STATUS_H__ #define __BFA_DEFS_STATUS_H__ -/** - * API status return values +/* API status return values * * NOTE: The error msgs are auto generated from the comments. Only singe line * comments are supported diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 0b640fafbda..354ae9792ba 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -20,13 +20,17 @@ #include "bfi_reg.h" #include "bfa_defs.h" -/** - * IOC local definitions - */ +/* IOC local definitions */ -/** - * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. - */ +#define bfa_ioc_state_disabled(__sm) \ + (((__sm) == BFI_IOC_UNINIT) || \ + ((__sm) == BFI_IOC_INITING) || \ + ((__sm) == BFI_IOC_HWINIT) || \ + ((__sm) == BFI_IOC_DISABLED) || \ + ((__sm) == BFI_IOC_FAIL) || \ + ((__sm) == BFI_IOC_CFG_DISABLED)) + +/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ #define bfa_ioc_firmware_lock(__ioc) \ ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) @@ -46,6 +50,14 @@ ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) #define bfa_ioc_sync_complete(__ioc) \ ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) +#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \ + ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate)) +#define bfa_ioc_get_cur_ioc_fwstate(__ioc) \ + ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc)) +#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \ + ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate)) +#define bfa_ioc_get_alt_ioc_fwstate(__ioc) \ + ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc)) #define bfa_ioc_mbox_cmd_pending(__ioc) \ (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ @@ -80,8 +92,8 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc); static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); -static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, - u32 boot_param); +static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc, + enum bfi_fwboot_type boot_type, u32 boot_param); static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num); @@ -96,9 +108,7 @@ static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); -/** - * IOC state machine definitions/declarations - */ +/* IOC state machine definitions/declarations */ enum ioc_event { IOC_E_RESET = 1, /*!< IOC reset request */ IOC_E_ENABLE = 2, /*!< IOC enable request */ @@ -148,9 +158,7 @@ static void bfa_iocpf_initfail(struct bfa_ioc *ioc); static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc); static void bfa_iocpf_stop(struct bfa_ioc *ioc); -/** - * IOCPF state machine events - */ +/* IOCPF state machine events */ enum iocpf_event { IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */ IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */ @@ -166,9 +174,7 @@ enum iocpf_event { IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */ }; -/** - * IOCPF states - */ +/* IOCPF states */ enum bfa_iocpf_state { BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */ BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ @@ -215,21 +221,15 @@ static struct bfa_sm_table iocpf_sm_table[] = { {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, }; -/** - * IOC State Machine - */ +/* IOC State Machine */ -/** - * Beginning state. IOC uninit state. - */ +/* Beginning state. IOC uninit state. */ static void bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc) { } -/** - * IOC is in uninit state. - */ +/* IOC is in uninit state. */ static void bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) { @@ -243,18 +243,14 @@ bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) } } -/** - * Reset entry actions -- initialize state machine - */ +/* Reset entry actions -- initialize state machine */ static void bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) { bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); } -/** - * IOC is in reset state. - */ +/* IOC is in reset state. */ static void bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) { @@ -282,8 +278,7 @@ bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) bfa_iocpf_enable(ioc); } -/** - * Host IOC function is being enabled, awaiting response from firmware. +/* Host IOC function is being enabled, awaiting response from firmware. * Semaphore is acquired. */ static void @@ -325,9 +320,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) } } -/** - * Semaphore should be acquired for version check. - */ +/* Semaphore should be acquired for version check. */ static void bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) { @@ -336,9 +329,7 @@ bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) bfa_ioc_send_getattr(ioc); } -/** - * IOC configuration in progress. Timer is active. - */ +/* IOC configuration in progress. Timer is active. */ static void bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) { @@ -419,9 +410,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) bfa_iocpf_disable(ioc); } -/** - * IOC is being disabled - */ +/* IOC is being disabled */ static void bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) { @@ -449,9 +438,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) } } -/** - * IOC disable completion entry. - */ +/* IOC disable completion entry. */ static void bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) { @@ -485,9 +472,7 @@ bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc) { } -/** - * Hardware initialization retry. - */ +/* Hardware initialization retry. */ static void bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) { @@ -534,9 +519,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc) { } -/** - * IOC failure. - */ +/* IOC failure. */ static void bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) { @@ -568,9 +551,7 @@ bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc) { } -/** - * IOC failure. - */ +/* IOC failure. */ static void bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event) { @@ -593,13 +574,9 @@ bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event) } } -/** - * IOCPF State Machine - */ +/* IOCPF State Machine */ -/** - * Reset entry actions -- initialize state machine - */ +/* Reset entry actions -- initialize state machine */ static void bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) { @@ -607,9 +584,7 @@ bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) iocpf->auto_recover = bfa_nw_auto_recover; } -/** - * Beginning state. IOC is in reset state. - */ +/* Beginning state. IOC is in reset state. */ static void bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -626,9 +601,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) } } -/** - * Semaphore should be acquired for version check. - */ +/* Semaphore should be acquired for version check. */ static void bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) { @@ -636,9 +609,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) bfa_ioc_hw_sem_get(iocpf->ioc); } -/** - * Awaiting h/w semaphore to continue with version check. - */ +/* Awaiting h/w semaphore to continue with version check. */ static void bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -683,9 +654,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) } } -/** - * Notify enable completion callback - */ +/* Notify enable completion callback */ static void bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) { @@ -698,9 +667,7 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) msecs_to_jiffies(BFA_IOC_TOV)); } -/** - * Awaiting firmware version match. - */ +/* Awaiting firmware version match. */ static void bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -727,18 +694,14 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) } } -/** - * Request for semaphore. - */ +/* Request for semaphore. */ static void bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf) { bfa_ioc_hw_sem_get(iocpf->ioc); } -/** - * Awaiting semaphore for h/w initialzation. - */ +/* Awaiting semaphore for h/w initialzation. */ static void bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -778,8 +741,7 @@ bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf) bfa_ioc_reset(iocpf->ioc, false); } -/** - * Hardware is being initialized. Interrupts are enabled. +/* Hardware is being initialized. Interrupts are enabled. * Holding hardware semaphore lock. */ static void @@ -822,8 +784,7 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf) bfa_ioc_send_enable(iocpf->ioc); } -/** - * Host IOC function is being enabled, awaiting response from firmware. +/* Host IOC function is being enabled, awaiting response from firmware. * Semaphore is acquired. */ static void @@ -896,9 +857,7 @@ bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf) bfa_ioc_send_disable(iocpf->ioc); } -/** - * IOC is being disabled - */ +/* IOC is being disabled */ static void bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -917,7 +876,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) */ case IOCPF_E_TIMEOUT: - writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); break; @@ -935,9 +894,7 @@ bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf) bfa_ioc_hw_sem_get(iocpf->ioc); } -/** - * IOC hb ack request is being removed. - */ +/* IOC hb ack request is being removed. */ static void bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -963,9 +920,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) } } -/** - * IOC disable completion entry. - */ +/* IOC disable completion entry. */ static void bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf) { @@ -1000,9 +955,7 @@ bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf) bfa_ioc_hw_sem_get(iocpf->ioc); } -/** - * Hardware initialization failed. - */ +/* Hardware initialization failed. */ static void bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -1012,7 +965,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) case IOCPF_E_SEMLOCKED: bfa_ioc_notify_fail(ioc); bfa_ioc_sync_leave(ioc); - writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); bfa_nw_ioc_hw_sem_release(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); break; @@ -1046,9 +999,7 @@ bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf) { } -/** - * Hardware initialization failed. - */ +/* Hardware initialization failed. */ static void bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -1084,9 +1035,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf) bfa_ioc_hw_sem_get(iocpf->ioc); } -/** - * IOC is in failed state. - */ +/* IOC is in failed state. */ static void bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -1098,7 +1047,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) bfa_ioc_notify_fail(ioc); if (!iocpf->auto_recover) { bfa_ioc_sync_leave(ioc); - writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); bfa_nw_ioc_hw_sem_release(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); } else { @@ -1134,10 +1083,7 @@ bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf) { } -/** - * @brief - * IOC is in failed state. - */ +/* IOC is in failed state. */ static void bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -1151,13 +1097,9 @@ bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) } } -/** - * BFA IOC private functions - */ +/* BFA IOC private functions */ -/** - * Notify common modules registered for notification. - */ +/* Notify common modules registered for notification. */ static void bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event) { @@ -1236,7 +1178,7 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); } - fwstate = readl(ioc->ioc_regs.ioc_fwstate); + fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); if (fwstate == BFI_IOC_UNINIT) { writel(1, ioc->ioc_regs.ioc_init_sem_reg); return; @@ -1250,8 +1192,8 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) } bfa_ioc_fwver_clear(ioc); - writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); - writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT); + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT); /* * Try to lock and then unlock the semaphore. @@ -1298,10 +1240,7 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc) del_timer(&ioc->sem_timer); } -/** - * @brief - * Initialize LPU local memory (aka secondary memory / SRAM) - */ +/* Initialize LPU local memory (aka secondary memory / SRAM) */ static void bfa_ioc_lmem_init(struct bfa_ioc *ioc) { @@ -1366,9 +1305,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc *ioc) writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); } -/** - * Get driver and firmware versions. - */ +/* Get driver and firmware versions. */ void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) { @@ -1388,51 +1325,522 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) } } +static bool +bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1, + struct bfi_ioc_image_hdr *fwhdr_2) +{ + int i; + + for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { + if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i]) + return false; + } + + return true; +} + +/* Returns TRUE if major minor and maintainence are same. + * If patch version are same, check for MD5 Checksum to be same. + */ +static bool +bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr, + struct bfi_ioc_image_hdr *fwhdr_to_cmp) +{ + if (drv_fwhdr->signature != fwhdr_to_cmp->signature) + return false; + if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major) + return false; + if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor) + return false; + if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint) + return false; + if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch && + drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase && + drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) + return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp); + + return true; +} + +static bool +bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr) +{ + if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF) + return false; + + return true; +} + +static bool +fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr) +{ + if (fwhdr->fwver.phase == 0 && + fwhdr->fwver.build == 0) + return false; + + return true; +} + +/* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */ +static enum bfi_ioc_img_ver_cmp +bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr, + struct bfi_ioc_image_hdr *fwhdr_to_cmp) +{ + if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false) + return BFI_IOC_IMG_VER_INCOMP; + + if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch) + return BFI_IOC_IMG_VER_BETTER; + else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch) + return BFI_IOC_IMG_VER_OLD; + + /* GA takes priority over internal builds of the same patch stream. + * At this point major minor maint and patch numbers are same. + */ + if (fwhdr_is_ga(base_fwhdr) == true) + if (fwhdr_is_ga(fwhdr_to_cmp)) + return BFI_IOC_IMG_VER_SAME; + else + return BFI_IOC_IMG_VER_OLD; + else + if (fwhdr_is_ga(fwhdr_to_cmp)) + return BFI_IOC_IMG_VER_BETTER; + + if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase) + return BFI_IOC_IMG_VER_BETTER; + else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase) + return BFI_IOC_IMG_VER_OLD; + + if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build) + return BFI_IOC_IMG_VER_BETTER; + else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build) + return BFI_IOC_IMG_VER_OLD; + + /* All Version Numbers are equal. + * Md5 check to be done as a part of compatibility check. + */ + return BFI_IOC_IMG_VER_SAME; +} + +/* register definitions */ +#define FLI_CMD_REG 0x0001d000 +#define FLI_WRDATA_REG 0x0001d00c +#define FLI_RDDATA_REG 0x0001d010 +#define FLI_ADDR_REG 0x0001d004 +#define FLI_DEV_STATUS_REG 0x0001d014 + +#define BFA_FLASH_FIFO_SIZE 128 /* fifo size */ +#define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */ +#define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */ +#define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */ + +#define NFC_STATE_RUNNING 0x20000001 +#define NFC_STATE_PAUSED 0x00004560 +#define NFC_VER_VALID 0x147 + +enum bfa_flash_cmd { + BFA_FLASH_FAST_READ = 0x0b, /* fast read */ + BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */ + BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */ + BFA_FLASH_WRITE = 0x02, /* write */ + BFA_FLASH_READ_STATUS = 0x05, /* read status */ +}; + +/* hardware error definition */ +enum bfa_flash_err { + BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */ + BFA_FLASH_UNINIT = -2, /*!< flash not initialized */ + BFA_FLASH_BAD = -3, /*!< flash bad */ + BFA_FLASH_BUSY = -4, /*!< flash busy */ + BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */ + BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */ + BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */ + BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */ + BFA_FLASH_ERR_LEN = -9, /*!< invalid length */ +}; + +/* flash command register data structure */ +union bfa_flash_cmd_reg { + struct { +#ifdef __BIG_ENDIAN + u32 act:1; + u32 rsv:1; + u32 write_cnt:9; + u32 read_cnt:9; + u32 addr_cnt:4; + u32 cmd:8; +#else + u32 cmd:8; + u32 addr_cnt:4; + u32 read_cnt:9; + u32 write_cnt:9; + u32 rsv:1; + u32 act:1; +#endif + } r; + u32 i; +}; + +/* flash device status register data structure */ +union bfa_flash_dev_status_reg { + struct { +#ifdef __BIG_ENDIAN + u32 rsv:21; + u32 fifo_cnt:6; + u32 busy:1; + u32 init_status:1; + u32 present:1; + u32 bad:1; + u32 good:1; +#else + u32 good:1; + u32 bad:1; + u32 present:1; + u32 init_status:1; + u32 busy:1; + u32 fifo_cnt:6; + u32 rsv:21; +#endif + } r; + u32 i; +}; + +/* flash address register data structure */ +union bfa_flash_addr_reg { + struct { +#ifdef __BIG_ENDIAN + u32 addr:24; + u32 dummy:8; +#else + u32 dummy:8; + u32 addr:24; +#endif + } r; + u32 i; +}; + +/* Flash raw private functions */ +static void +bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt, + u8 rd_cnt, u8 ad_cnt, u8 op) +{ + union bfa_flash_cmd_reg cmd; + + cmd.i = 0; + cmd.r.act = 1; + cmd.r.write_cnt = wr_cnt; + cmd.r.read_cnt = rd_cnt; + cmd.r.addr_cnt = ad_cnt; + cmd.r.cmd = op; + writel(cmd.i, (pci_bar + FLI_CMD_REG)); +} + +static void +bfa_flash_set_addr(void __iomem *pci_bar, u32 address) +{ + union bfa_flash_addr_reg addr; + + addr.r.addr = address & 0x00ffffff; + addr.r.dummy = 0; + writel(addr.i, (pci_bar + FLI_ADDR_REG)); +} + +static int +bfa_flash_cmd_act_check(void __iomem *pci_bar) +{ + union bfa_flash_cmd_reg cmd; + + cmd.i = readl(pci_bar + FLI_CMD_REG); + + if (cmd.r.act) + return BFA_FLASH_ERR_CMD_ACT; + + return 0; +} + +/* Flush FLI data fifo. */ +static u32 +bfa_flash_fifo_flush(void __iomem *pci_bar) +{ + u32 i; + u32 t; + union bfa_flash_dev_status_reg dev_status; + + dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + + if (!dev_status.r.fifo_cnt) + return 0; + + /* fifo counter in terms of words */ + for (i = 0; i < dev_status.r.fifo_cnt; i++) + t = readl(pci_bar + FLI_RDDATA_REG); + + /* Check the device status. It may take some time. */ + for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { + dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + if (!dev_status.r.fifo_cnt) + break; + } + + if (dev_status.r.fifo_cnt) + return BFA_FLASH_ERR_FIFO_CNT; + + return 0; +} + +/* Read flash status. */ +static u32 +bfa_flash_status_read(void __iomem *pci_bar) +{ + union bfa_flash_dev_status_reg dev_status; + u32 status; + u32 ret_status; + int i; + + status = bfa_flash_fifo_flush(pci_bar); + if (status < 0) + return status; + + bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS); + + for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { + status = bfa_flash_cmd_act_check(pci_bar); + if (!status) + break; + } + + if (status) + return status; + + dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + if (!dev_status.r.fifo_cnt) + return BFA_FLASH_BUSY; + + ret_status = readl(pci_bar + FLI_RDDATA_REG); + ret_status >>= 24; + + status = bfa_flash_fifo_flush(pci_bar); + if (status < 0) + return status; + + return ret_status; +} + +/* Start flash read operation. */ +static u32 +bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, + char *buf) +{ + u32 status; + + /* len must be mutiple of 4 and not exceeding fifo size */ + if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) + return BFA_FLASH_ERR_LEN; + + /* check status */ + status = bfa_flash_status_read(pci_bar); + if (status == BFA_FLASH_BUSY) + status = bfa_flash_status_read(pci_bar); + + if (status < 0) + return status; + + /* check if write-in-progress bit is cleared */ + if (status & BFA_FLASH_WIP_MASK) + return BFA_FLASH_ERR_WIP; + + bfa_flash_set_addr(pci_bar, offset); + + bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ); + + return 0; +} + +/* Check flash read operation. */ +static u32 +bfa_flash_read_check(void __iomem *pci_bar) +{ + if (bfa_flash_cmd_act_check(pci_bar)) + return 1; + + return 0; +} + +/* End flash read operation. */ +static void +bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf) +{ + u32 i; + + /* read data fifo up to 32 words */ + for (i = 0; i < len; i += 4) { + u32 w = readl(pci_bar + FLI_RDDATA_REG); + *((u32 *)(buf + i)) = swab32(w); + } + + bfa_flash_fifo_flush(pci_bar); +} + +/* Perform flash raw read. */ + +#define FLASH_BLOCKING_OP_MAX 500 +#define FLASH_SEM_LOCK_REG 0x18820 + +static int +bfa_raw_sem_get(void __iomem *bar) +{ + int locked; + + locked = readl((bar + FLASH_SEM_LOCK_REG)); + + return !locked; +} + +static enum bfa_status +bfa_flash_sem_get(void __iomem *bar) +{ + u32 n = FLASH_BLOCKING_OP_MAX; + + while (!bfa_raw_sem_get(bar)) { + if (--n <= 0) + return BFA_STATUS_BADFLASH; + mdelay(10); + } + return BFA_STATUS_OK; +} + +static void +bfa_flash_sem_put(void __iomem *bar) +{ + writel(0, (bar + FLASH_SEM_LOCK_REG)); +} + +static enum bfa_status +bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, + u32 len) +{ + u32 n, status; + u32 off, l, s, residue, fifo_sz; + + residue = len; + off = 0; + fifo_sz = BFA_FLASH_FIFO_SIZE; + status = bfa_flash_sem_get(pci_bar); + if (status != BFA_STATUS_OK) + return status; + + while (residue) { + s = offset + off; + n = s / fifo_sz; + l = (n + 1) * fifo_sz - s; + if (l > residue) + l = residue; + + status = bfa_flash_read_start(pci_bar, offset + off, l, + &buf[off]); + if (status < 0) { + bfa_flash_sem_put(pci_bar); + return BFA_STATUS_FAILED; + } + + n = BFA_FLASH_BLOCKING_OP_MAX; + while (bfa_flash_read_check(pci_bar)) { + if (--n <= 0) { + bfa_flash_sem_put(pci_bar); + return BFA_STATUS_FAILED; + } + } + + bfa_flash_read_end(pci_bar, l, &buf[off]); + + residue -= l; + off += l; + } + bfa_flash_sem_put(pci_bar); + + return BFA_STATUS_OK; +} + +#define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */ + +static enum bfa_status +bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off, + u32 *fwimg) +{ + return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva, + BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)), + (char *)fwimg, BFI_FLASH_CHUNK_SZ); +} + +static enum bfi_ioc_img_ver_cmp +bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc, + struct bfi_ioc_image_hdr *base_fwhdr) +{ + struct bfi_ioc_image_hdr *flash_fwhdr; + enum bfa_status status; + u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS]; + + status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg); + if (status != BFA_STATUS_OK) + return BFI_IOC_IMG_VER_INCOMP; + + flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg; + if (bfa_ioc_flash_fwver_valid(flash_fwhdr)) + return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr); + else + return BFI_IOC_IMG_VER_INCOMP; +} + /** - * Returns TRUE if same. + * Returns TRUE if driver is willing to work with current smem f/w version. */ bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) { struct bfi_ioc_image_hdr *drv_fwhdr; - int i; + enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp; drv_fwhdr = (struct bfi_ioc_image_hdr *) bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); - for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { - if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) - return false; + /* If smem is incompatible or old, driver should not work with it. */ + drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr); + if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP || + drv_smem_cmp == BFI_IOC_IMG_VER_OLD) { + return false; } - return true; + /* IF Flash has a better F/W than smem do not work with smem. + * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it. + * If Flash is old or incomp work with smem iff smem f/w == drv f/w. + */ + smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr); + + if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) + return false; + else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) + return true; + else + return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ? + true : false; } -/** - * Return true if current running version is valid. Firmware signature and +/* Return true if current running version is valid. Firmware signature and * execution context (driver/bios) must match. */ static bool bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env) { - struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr; + struct bfi_ioc_image_hdr fwhdr; bfa_nw_ioc_fwver_get(ioc, &fwhdr); - drv_fwhdr = (struct bfi_ioc_image_hdr *) - bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); - - if (fwhdr.signature != drv_fwhdr->signature) - return false; - if (swab32(fwhdr.bootenv) != boot_env) return false; return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); } -/** - * Conditionally flush any pending message from firmware at start. - */ +/* Conditionally flush any pending message from firmware at start. */ static void bfa_ioc_msgflush(struct bfa_ioc *ioc) { @@ -1443,9 +1851,6 @@ bfa_ioc_msgflush(struct bfa_ioc *ioc) writel(1, ioc->ioc_regs.lpu_mbox_cmd); } -/** - * @img ioc_init_logic.jpg - */ static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) { @@ -1453,7 +1858,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) bool fwvalid; u32 boot_env; - ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); + ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); if (force) ioc_fwstate = BFI_IOC_UNINIT; @@ -1467,8 +1872,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) false : bfa_ioc_fwver_valid(ioc, boot_env); if (!fwvalid) { - bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env); - bfa_ioc_poll_fwinit(ioc); + if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == + BFA_STATUS_OK) + bfa_ioc_poll_fwinit(ioc); + return; } @@ -1498,8 +1905,9 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) /** * Initialize the h/w for any other states. */ - bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env); - bfa_ioc_poll_fwinit(ioc); + if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == + BFA_STATUS_OK) + bfa_ioc_poll_fwinit(ioc); } void @@ -1603,11 +2011,8 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc) del_timer(&ioc->hb_timer); } -/** - * @brief - * Initiate a full firmware download. - */ -static void +/* Initiate a full firmware download. */ +static enum bfa_status bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env) { @@ -1617,18 +2022,47 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, u32 chunkno = 0; u32 i; u32 asicmode; + u32 fwimg_size; + u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS]; + enum bfa_status status; + + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_FLASH) { + fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32); - fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno); + status = bfa_nw_ioc_flash_img_get_chnk(ioc, + BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf); + if (status != BFA_STATUS_OK) + return status; + + fwimg = fwimg_buf; + } else { + fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); + fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), + BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); + } pgnum = bfa_ioc_smem_pgnum(ioc, loff); writel(pgnum, ioc->ioc_regs.host_page_num_fn); - for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) { + for (i = 0; i < fwimg_size; i++) { if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { chunkno = BFA_IOC_FLASH_CHUNK_NO(i); - fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_FLASH) { + status = bfa_nw_ioc_flash_img_get_chnk(ioc, + BFA_IOC_FLASH_CHUNK_ADDR(chunkno), + fwimg_buf); + if (status != BFA_STATUS_OK) + return status; + + fwimg = fwimg_buf; + } else { + fwimg = bfa_cb_image_get_chunk( + bfa_ioc_asic_gen(ioc), BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); + } } /** @@ -1656,6 +2090,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, /* * Set boot type, env and device mode at the end. */ + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_FLASH) { + boot_type = BFI_FWBOOT_TYPE_NORMAL; + } asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, ioc->port0_mode, ioc->port1_mode); writel(asicmode, ((ioc->ioc_regs.smem_page_start) @@ -1664,6 +2102,7 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, + (BFI_FWBOOT_TYPE_OFF))); writel(boot_env, ((ioc->ioc_regs.smem_page_start) + (BFI_FWBOOT_ENV_OFF))); + return BFA_STATUS_OK; } static void @@ -1672,9 +2111,7 @@ bfa_ioc_reset(struct bfa_ioc *ioc, bool force) bfa_ioc_hwinit(ioc, force); } -/** - * BFA ioc enable reply by firmware - */ +/* BFA ioc enable reply by firmware */ static void bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode, u8 cap_bm) @@ -1686,10 +2123,7 @@ bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode, bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); } -/** - * @brief - * Update BFA configuration from firmware configuration. - */ +/* Update BFA configuration from firmware configuration. */ static void bfa_ioc_getattr_reply(struct bfa_ioc *ioc) { @@ -1702,9 +2136,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc *ioc) bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); } -/** - * Attach time initialization of mbox logic. - */ +/* Attach time initialization of mbox logic. */ static void bfa_ioc_mbox_attach(struct bfa_ioc *ioc) { @@ -1718,9 +2150,7 @@ bfa_ioc_mbox_attach(struct bfa_ioc *ioc) } } -/** - * Mbox poll timer -- restarts any pending mailbox requests. - */ +/* Mbox poll timer -- restarts any pending mailbox requests. */ static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc) { @@ -1760,9 +2190,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc) } } -/** - * Cleanup any pending requests. - */ +/* Cleanup any pending requests. */ static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc) { @@ -1774,12 +2202,12 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc) } /** - * Read data from SMEM to host through PCI memmap + * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap * - * @param[in] ioc memory for IOC - * @param[in] tbuf app memory to store data from smem - * @param[in] soff smem offset - * @param[in] sz size of smem in bytes + * @ioc: memory for IOC + * @tbuf: app memory to store data from smem + * @soff: smem offset + * @sz: size of smem in bytes */ static int bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz) @@ -1826,9 +2254,7 @@ bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz) return 0; } -/** - * Retrieve saved firmware trace from a prior IOC failure. - */ +/* Retrieve saved firmware trace from a prior IOC failure. */ int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen) { @@ -1844,9 +2270,7 @@ bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen) return status; } -/** - * Save firmware trace if configured. - */ +/* Save firmware trace if configured. */ static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc) { @@ -1861,9 +2285,7 @@ bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc) } } -/** - * Retrieve saved firmware trace from a prior IOC failure. - */ +/* Retrieve saved firmware trace from a prior IOC failure. */ int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen) { @@ -1892,9 +2314,7 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc) bfa_nw_ioc_debug_save_ftrc(ioc); } -/** - * IOCPF to IOC interface - */ +/* IOCPF to IOC interface */ static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc) { @@ -1928,9 +2348,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc) ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); } -/** - * IOC public - */ +/* IOC public */ static enum bfa_status bfa_ioc_pll_init(struct bfa_ioc *ioc) { @@ -1954,38 +2372,53 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc) return BFA_STATUS_OK; } -/** - * Interface used by diag module to do firmware boot with memory test +/* Interface used by diag module to do firmware boot with memory test * as the entry vector. */ -static void +static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, u32 boot_env) { + struct bfi_ioc_image_hdr *drv_fwhdr; + enum bfa_status status; bfa_ioc_stats(ioc, ioc_boots); if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) - return; + return BFA_STATUS_FAILED; + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_NORMAL) { + drv_fwhdr = (struct bfi_ioc_image_hdr *) + bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); + /* Work with Flash iff flash f/w is better than driver f/w. + * Otherwise push drivers firmware. + */ + if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) == + BFI_IOC_IMG_VER_BETTER) + boot_type = BFI_FWBOOT_TYPE_FLASH; + } /** * Initialize IOC state of all functions on a chip reset. */ if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { - writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate); - writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST); + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST); } else { - writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate); - writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING); + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING); } bfa_ioc_msgflush(ioc); - bfa_ioc_download_fw(ioc, boot_type, boot_env); - bfa_ioc_lpu_start(ioc); + status = bfa_ioc_download_fw(ioc, boot_type, boot_env); + if (status == BFA_STATUS_OK) + bfa_ioc_lpu_start(ioc); + else + bfa_nw_iocpf_timeout(ioc); + + return status; } -/** - * Enable/disable IOC failure auto recovery. - */ +/* Enable/disable IOC failure auto recovery. */ void bfa_nw_ioc_auto_recover(bool auto_recover) { @@ -2056,10 +2489,10 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) } /** - * IOC attach time initialization and setup. + * bfa_nw_ioc_attach - IOC attach time initialization and setup. * - * @param[in] ioc memory for IOC - * @param[in] bfa driver instance structure + * @ioc: memory for IOC + * @bfa: driver instance structure */ void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) @@ -2078,9 +2511,7 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) bfa_fsm_send_event(ioc, IOC_E_RESET); } -/** - * Driver detach time IOC cleanup. - */ +/* Driver detach time IOC cleanup. */ void bfa_nw_ioc_detach(struct bfa_ioc *ioc) { @@ -2091,9 +2522,9 @@ bfa_nw_ioc_detach(struct bfa_ioc *ioc) } /** - * Setup IOC PCI properties. + * bfa_nw_ioc_pci_init - Setup IOC PCI properties. * - * @param[in] pcidev PCI device information for this IOC + * @pcidev: PCI device information for this IOC */ void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, @@ -2160,10 +2591,10 @@ bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, } /** - * Initialize IOC dma memory + * bfa_nw_ioc_mem_claim - Initialize IOC dma memory * - * @param[in] dm_kva kernel virtual address of IOC dma memory - * @param[in] dm_pa physical address of IOC dma memory + * @dm_kva: kernel virtual address of IOC dma memory + * @dm_pa: physical address of IOC dma memory */ void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) @@ -2176,9 +2607,7 @@ bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) ioc->attr = (struct bfi_ioc_attr *) dm_kva; } -/** - * Return size of dma memory required. - */ +/* Return size of dma memory required. */ u32 bfa_nw_ioc_meminfo(void) { @@ -2201,9 +2630,7 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc) bfa_fsm_send_event(ioc, IOC_E_DISABLE); } -/** - * Initialize memory for saving firmware trace. - */ +/* Initialize memory for saving firmware trace. */ void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave) { @@ -2217,9 +2644,7 @@ bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr) return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); } -/** - * Register mailbox message handler function, to be called by common modules - */ +/* Register mailbox message handler function, to be called by common modules */ void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) @@ -2231,11 +2656,12 @@ bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, } /** - * Queue a mailbox command request to firmware. Waits if mailbox is busy. - * Responsibility of caller to serialize + * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware. + * + * @ioc: IOC instance + * @cmd: Mailbox command * - * @param[in] ioc IOC instance - * @param[i] cmd Mailbox command + * Waits if mailbox is busy. Responsibility of caller to serialize */ bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, @@ -2272,9 +2698,7 @@ bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, return false; } -/** - * Handle mailbox interrupts - */ +/* Handle mailbox interrupts */ void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc) { @@ -2314,9 +2738,7 @@ bfa_nw_ioc_error_isr(struct bfa_ioc *ioc) bfa_fsm_send_event(ioc, IOC_E_HWERROR); } -/** - * return true if IOC is disabled - */ +/* return true if IOC is disabled */ bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) { @@ -2324,17 +2746,14 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); } -/** - * return true if IOC is operational - */ +/* return true if IOC is operational */ bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) { return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); } -/** - * Add to IOC heartbeat failure notification queue. To be used by common +/* Add to IOC heartbeat failure notification queue. To be used by common * modules such as cee, port, diag. */ void @@ -2401,7 +2820,6 @@ bfa_ioc_get_type(struct bfa_ioc *ioc) static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num) { - memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); memcpy(serial_num, (void *)ioc->attr->brcd_serialnum, BFA_ADAPTER_SERIAL_NUM_LEN); @@ -2410,7 +2828,6 @@ bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num) static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver) { - memset(fw_ver, 0, BFA_VERSION_LEN); memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); } @@ -2432,7 +2849,6 @@ bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev) static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) { - memset(optrom_ver, 0, BFA_VERSION_LEN); memcpy(optrom_ver, ioc->attr->optrom_version, BFA_VERSION_LEN); } @@ -2440,7 +2856,6 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) { - memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); } @@ -2503,7 +2918,7 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); ioc_attr->state = bfa_ioc_get_state(ioc); - ioc_attr->port_id = ioc->port_id; + ioc_attr->port_id = bfa_ioc_portid(ioc); ioc_attr->port_mode = ioc->port_mode; ioc_attr->port_mode_cfg = ioc->port_mode_cfg; @@ -2513,14 +2928,13 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); - ioc_attr->pci_attr.device_id = ioc->pcidev.device_id; - ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func; + ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc); + ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc); + ioc_attr->def_fn = bfa_ioc_is_default(ioc); bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); } -/** - * WWN public - */ +/* WWN public */ static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc) { @@ -2533,9 +2947,7 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc) return ioc->attr->mac; } -/** - * Firmware failure detected. Start recovery actions. - */ +/* Firmware failure detected. Start recovery actions. */ static void bfa_ioc_recover(struct bfa_ioc *ioc) { @@ -2545,10 +2957,7 @@ bfa_ioc_recover(struct bfa_ioc *ioc) bfa_fsm_send_event(ioc, IOC_E_HBFAIL); } -/** - * @dg hal_iocpf_pvt BFA IOC PF private functions - * @{ - */ +/* BFA IOC PF private functions */ static void bfa_iocpf_enable(struct bfa_ioc *ioc) @@ -2611,7 +3020,7 @@ bfa_nw_iocpf_sem_timeout(void *ioc_arg) static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) { - u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate); + u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); if (fwstate == BFI_IOC_DISABLED) { bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); @@ -2669,8 +3078,6 @@ bfa_flash_notify(void *cbarg, enum bfa_ioc_event event) /* * Send flash write request. - * - * @param[in] cbarg - callback argument */ static void bfa_flash_write_send(struct bfa_flash *flash) @@ -2699,10 +3106,10 @@ bfa_flash_write_send(struct bfa_flash *flash) flash->offset += len; } -/* - * Send flash read request. +/** + * bfa_flash_read_send - Send flash read request. * - * @param[in] cbarg - callback argument + * @cbarg: callback argument */ static void bfa_flash_read_send(void *cbarg) @@ -2724,11 +3131,11 @@ bfa_flash_read_send(void *cbarg) bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); } -/* - * Process flash response messages upon receiving interrupts. +/** + * bfa_flash_intr - Process flash response messages upon receiving interrupts. * - * @param[in] flasharg - flash structure - * @param[in] msg - message structure + * @flasharg: flash structure + * @msg: message structure */ static void bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg) @@ -2821,12 +3228,12 @@ bfa_nw_flash_meminfo(void) return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); } -/* - * Flash attach API. +/** + * bfa_nw_flash_attach - Flash attach API. * - * @param[in] flash - flash structure - * @param[in] ioc - ioc structure - * @param[in] dev - device structure + * @flash: flash structure + * @ioc: ioc structure + * @dev: device structure */ void bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev) @@ -2842,12 +3249,12 @@ bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev) list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); } -/* - * Claim memory for flash +/** + * bfa_nw_flash_memclaim - Claim memory for flash * - * @param[in] flash - flash structure - * @param[in] dm_kva - pointer to virtual memory address - * @param[in] dm_pa - physical memory address + * @flash: flash structure + * @dm_kva: pointer to virtual memory address + * @dm_pa: physical memory address */ void bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa) @@ -2859,13 +3266,13 @@ bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa) dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); } -/* - * Get flash attribute. +/** + * bfa_nw_flash_get_attr - Get flash attribute. * - * @param[in] flash - flash structure - * @param[in] attr - flash attribute structure - * @param[in] cbfn - callback function - * @param[in] cbarg - callback argument + * @flash: flash structure + * @attr: flash attribute structure + * @cbfn: callback function + * @cbarg: callback argument * * Return status. */ @@ -2895,17 +3302,17 @@ bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr, return BFA_STATUS_OK; } -/* - * Update flash partition. +/** + * bfa_nw_flash_update_part - Update flash partition. * - * @param[in] flash - flash structure - * @param[in] type - flash partition type - * @param[in] instance - flash partition instance - * @param[in] buf - update data buffer - * @param[in] len - data buffer length - * @param[in] offset - offset relative to the partition starting address - * @param[in] cbfn - callback function - * @param[in] cbarg - callback argument + * @flash: flash structure + * @type: flash partition type + * @instance: flash partition instance + * @buf: update data buffer + * @len: data buffer length + * @offset: offset relative to the partition starting address + * @cbfn: callback function + * @cbarg: callback argument * * Return status. */ @@ -2944,17 +3351,17 @@ bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance, return BFA_STATUS_OK; } -/* - * Read flash partition. +/** + * bfa_nw_flash_read_part - Read flash partition. * - * @param[in] flash - flash structure - * @param[in] type - flash partition type - * @param[in] instance - flash partition instance - * @param[in] buf - read data buffer - * @param[in] len - data buffer length - * @param[in] offset - offset relative to the partition starting address - * @param[in] cbfn - callback function - * @param[in] cbarg - callback argument + * @flash: flash structure + * @type: flash partition type + * @instance: flash partition instance + * @buf: read data buffer + * @len: data buffer length + * @offset: offset relative to the partition starting address + * @cbfn: callback function + * @cbarg: callback argument * * Return status. */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h index 3b4460fdc14..20cff7df4b5 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h @@ -30,9 +30,7 @@ #define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \ BFI_IOC_TRC_HDR_SZ) -/** - * PCI device information required by IOC - */ +/* PCI device information required by IOC */ struct bfa_pcidev { int pci_slot; u8 pci_func; @@ -41,8 +39,7 @@ struct bfa_pcidev { void __iomem *pci_bar_kva; }; -/** - * Structure used to remember the DMA-able memory block's KVA and Physical +/* Structure used to remember the DMA-able memory block's KVA and Physical * Address */ struct bfa_dma { @@ -52,15 +49,11 @@ struct bfa_dma { #define BFA_DMA_ALIGN_SZ 256 -/** - * smem size for Crossbow and Catapult - */ +/* smem size for Crossbow and Catapult */ #define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */ #define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ -/** - * @brief BFA dma address assignment macro. (big endian format) - */ +/* BFA dma address assignment macro. (big endian format) */ #define bfa_dma_be_addr_set(dma_addr, pa) \ __bfa_dma_be_addr_set(&dma_addr, (u64)pa) static inline void @@ -108,9 +101,7 @@ struct bfa_ioc_regs { u32 smem_pg0; }; -/** - * IOC Mailbox structures - */ +/* IOC Mailbox structures */ typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg); struct bfa_mbox_cmd { struct list_head qe; @@ -119,9 +110,7 @@ struct bfa_mbox_cmd { u32 msg[BFI_IOC_MSGSZ]; }; -/** - * IOC mailbox module - */ +/* IOC mailbox module */ typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m); struct bfa_ioc_mbox_mod { struct list_head cmd_q; /*!< pending mbox queue */ @@ -132,9 +121,7 @@ struct bfa_ioc_mbox_mod { } mbhdlr[BFI_MC_MAX]; }; -/** - * IOC callback function interfaces - */ +/* IOC callback function interfaces */ typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status); typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa); typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa); @@ -146,9 +133,7 @@ struct bfa_ioc_cbfn { bfa_ioc_reset_cbfn_t reset_cbfn; }; -/** - * IOC event notification mechanism. - */ +/* IOC event notification mechanism. */ enum bfa_ioc_event { BFA_IOC_E_ENABLED = 1, BFA_IOC_E_DISABLED = 2, @@ -163,9 +148,7 @@ struct bfa_ioc_notify { void *cbarg; }; -/** - * Initialize a IOC event notification structure - */ +/* Initialize a IOC event notification structure */ #define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \ (__notify)->cbfn = (__cbfn); \ (__notify)->cbarg = (__cbarg); \ @@ -232,6 +215,13 @@ struct bfa_ioc_hwif { void (*ioc_sync_ack) (struct bfa_ioc *ioc); bool (*ioc_sync_complete) (struct bfa_ioc *ioc); bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc); + void (*ioc_set_fwstate) (struct bfa_ioc *ioc, + enum bfi_ioc_state fwstate); + enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc *ioc); + void (*ioc_set_alt_fwstate) (struct bfa_ioc *ioc, + enum bfi_ioc_state fwstate); + enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc *ioc); + }; #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) @@ -239,6 +229,8 @@ struct bfa_ioc_hwif { #define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva) #define bfa_ioc_portid(__ioc) ((__ioc)->port_id) #define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen) +#define bfa_ioc_is_default(__ioc) \ + (bfa_ioc_pcifn(__ioc) == bfa_ioc_portid(__ioc)) #define bfa_ioc_fetch_stats(__ioc, __stats) \ (((__stats)->drv_stats) = (__ioc)->stats) #define bfa_ioc_clr_stats(__ioc) \ @@ -261,9 +253,7 @@ struct bfa_ioc_hwif { #define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) #define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) -/** - * IOC mailbox interface - */ +/* IOC mailbox interface */ bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, bfa_mbox_cmd_cbfn_t cbfn, void *cbarg); @@ -271,9 +261,7 @@ void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc); void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); -/** - * IOC interfaces - */ +/* IOC interfaces */ #define bfa_ioc_pll_init_asic(__ioc) \ ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ @@ -310,6 +298,7 @@ void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc); bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc); void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); +enum bfa_status bfa_nw_ioc_fwsig_invalidate(struct bfa_ioc *ioc); void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, struct bfa_ioc_notify *notify); bool bfa_nw_ioc_sem_get(void __iomem *sem_reg); diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c index b6b036a143a..d639558455c 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c @@ -48,6 +48,12 @@ static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); +static void bfa_ioc_ct_set_cur_ioc_fwstate( + struct bfa_ioc *ioc, enum bfi_ioc_state fwstate); +static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc); +static void bfa_ioc_ct_set_alt_ioc_fwstate( + struct bfa_ioc *ioc, enum bfi_ioc_state fwstate); +static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc); static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode); static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, @@ -68,6 +74,10 @@ static const struct bfa_ioc_hwif nw_hwif_ct = { .ioc_sync_leave = bfa_ioc_ct_sync_leave, .ioc_sync_ack = bfa_ioc_ct_sync_ack, .ioc_sync_complete = bfa_ioc_ct_sync_complete, + .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate, + .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate, + .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate, + .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate, }; static const struct bfa_ioc_hwif nw_hwif_ct2 = { @@ -85,11 +95,13 @@ static const struct bfa_ioc_hwif nw_hwif_ct2 = { .ioc_sync_leave = bfa_ioc_ct_sync_leave, .ioc_sync_ack = bfa_ioc_ct_sync_ack, .ioc_sync_complete = bfa_ioc_ct_sync_complete, + .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate, + .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate, + .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate, + .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate, }; -/** - * Called from bfa_ioc_attach() to map asic specific calls. - */ +/* Called from bfa_ioc_attach() to map asic specific calls. */ void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) { @@ -102,9 +114,7 @@ bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc) ioc->ioc_hwif = &nw_hwif_ct2; } -/** - * Return true if firmware of current driver matches the running firmware. - */ +/* Return true if firmware of current driver matches the running firmware. */ static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) { @@ -182,9 +192,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); } -/** - * Notify other functions on HB failure. - */ +/* Notify other functions on HB failure. */ static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) { @@ -195,9 +203,7 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) readl(ioc->ioc_regs.alt_ll_halt); } -/** - * Host to LPU mailbox message addresses - */ +/* Host to LPU mailbox message addresses */ static const struct { u32 hfn_mbox; u32 lpu_mbox; @@ -209,9 +215,7 @@ static const struct { { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } }; -/** - * Host <-> LPU mailbox command/status registers - port 0 - */ +/* Host <-> LPU mailbox command/status registers - port 0 */ static const struct { u32 hfn; u32 lpu; @@ -222,9 +226,7 @@ static const struct { { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } }; -/** - * Host <-> LPU mailbox command/status registers - port 1 - */ +/* Host <-> LPU mailbox command/status registers - port 1 */ static const struct { u32 hfn; u32 lpu; @@ -368,9 +370,7 @@ bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc) ioc->ioc_regs.err_set = rb + ERR_SET_REG; } -/** - * Initialize IOC to port mapping. - */ +/* Initialize IOC to port mapping. */ #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) static void @@ -398,9 +398,7 @@ bfa_ioc_ct2_map_port(struct bfa_ioc *ioc) ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); } -/** - * Set interrupt mode for a function: INTX or MSIX - */ +/* Set interrupt mode for a function: INTX or MSIX */ static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) { @@ -443,9 +441,7 @@ bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc) return false; } -/** - * MSI-X resource allocation for 1860 with no asic block - */ +/* MSI-X resource allocation for 1860 with no asic block */ #define HOSTFN_MSIX_DEFAULT 64 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c @@ -473,9 +469,7 @@ bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc) rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); } -/** - * Cleanup hw semaphore and usecnt registers - */ +/* Cleanup hw semaphore and usecnt registers */ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) { @@ -492,9 +486,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) bfa_nw_ioc_hw_sem_release(ioc); } -/** - * Synchronized IOC failure processing routines - */ +/* Synchronized IOC failure processing routines */ static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) { @@ -518,9 +510,7 @@ bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) return bfa_ioc_ct_sync_complete(ioc); } -/** - * Synchronized IOC failure processing routines - */ +/* Synchronized IOC failure processing routines */ static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) { @@ -589,6 +579,32 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) return false; } +static void +bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc, + enum bfi_ioc_state fwstate) +{ + writel(fwstate, ioc->ioc_regs.ioc_fwstate); +} + +static enum bfi_ioc_state +bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc) +{ + return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate); +} + +static void +bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc, + enum bfi_ioc_state fwstate) +{ + writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate); +} + +static enum bfi_ioc_state +bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc) +{ + return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate); +} + static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) { diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c index dd36427f475..55067d0d25c 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c @@ -16,9 +16,7 @@ * www.brocade.com */ -/** - * @file bfa_msgq.c MSGQ module source file. - */ +/* MSGQ module source file. */ #include "bfi.h" #include "bfa_msgq.h" diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h index 0d9df695397..8c563a77cdf 100644 --- a/drivers/net/ethernet/brocade/bna/bfi.h +++ b/drivers/net/ethernet/brocade/bna/bfi.h @@ -22,15 +22,12 @@ #pragma pack(1) -/** - * BFI FW image type - */ +/* BFI FW image type */ #define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */ #define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) +#define BFI_FLASH_IMAGE_SZ 0x100000 -/** - * Msg header common to all msgs - */ +/* Msg header common to all msgs */ struct bfi_mhdr { u8 msg_class; /*!< @ref enum bfi_mclass */ u8 msg_id; /*!< msg opcode with in the class */ @@ -65,17 +62,14 @@ struct bfi_mhdr { #define BFI_I2H_OPCODE_BASE 128 #define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE) -/** - **************************************************************************** +/**************************************************************************** * * Scatter Gather Element and Page definition * **************************************************************************** */ -/** - * DMA addresses - */ +/* DMA addresses */ union bfi_addr_u { struct { u32 addr_lo; @@ -83,9 +77,7 @@ union bfi_addr_u { } a32; }; -/** - * Generic DMA addr-len pair. - */ +/* Generic DMA addr-len pair. */ struct bfi_alen { union bfi_addr_u al_addr; /* DMA addr of buffer */ u32 al_len; /* length of buffer */ @@ -98,26 +90,20 @@ struct bfi_alen { #define BFI_LMSG_PL_WSZ \ ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4) -/** - * Mailbox message structure - */ +/* Mailbox message structure */ #define BFI_MBMSG_SZ 7 struct bfi_mbmsg { struct bfi_mhdr mh; u32 pl[BFI_MBMSG_SZ]; }; -/** - * Supported PCI function class codes (personality) - */ +/* Supported PCI function class codes (personality) */ enum bfi_pcifn_class { BFI_PCIFN_CLASS_FC = 0x0c04, BFI_PCIFN_CLASS_ETH = 0x0200, }; -/** - * Message Classes - */ +/* Message Classes */ enum bfi_mclass { BFI_MC_IOC = 1, /*!< IO Controller (IOC) */ BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */ @@ -159,15 +145,12 @@ enum bfi_mclass { #define BFI_FWBOOT_ENV_OS 0 -/** - *---------------------------------------------------------------------- +/*---------------------------------------------------------------------- * IOC *---------------------------------------------------------------------- */ -/** - * Different asic generations - */ +/* Different asic generations */ enum bfi_asic_gen { BFI_ASIC_GEN_CB = 1, BFI_ASIC_GEN_CT = 2, @@ -196,9 +179,7 @@ enum bfi_ioc_i2h_msgs { BFI_IOC_I2H_HBEAT = BFA_I2HM(4), }; -/** - * BFI_IOC_H2I_GETATTR_REQ message - */ +/* BFI_IOC_H2I_GETATTR_REQ message */ struct bfi_ioc_getattr_req { struct bfi_mhdr mh; union bfi_addr_u attr_addr; @@ -231,37 +212,51 @@ struct bfi_ioc_attr { u32 card_type; /*!< card type */ }; -/** - * BFI_IOC_I2H_GETATTR_REPLY message - */ +/* BFI_IOC_I2H_GETATTR_REPLY message */ struct bfi_ioc_getattr_reply { struct bfi_mhdr mh; /*!< Common msg header */ u8 status; /*!< cfg reply status */ u8 rsvd[3]; }; -/** - * Firmware memory page offsets - */ +/* Firmware memory page offsets */ #define BFI_IOC_SMEM_PG0_CB (0x40) #define BFI_IOC_SMEM_PG0_CT (0x180) -/** - * Firmware statistic offset - */ +/* Firmware statistic offset */ #define BFI_IOC_FWSTATS_OFF (0x6B40) #define BFI_IOC_FWSTATS_SZ (4096) -/** - * Firmware trace offset - */ +/* Firmware trace offset */ #define BFI_IOC_TRC_OFF (0x4b00) #define BFI_IOC_TRC_ENTS 256 #define BFI_IOC_TRC_ENT_SZ 16 #define BFI_IOC_TRC_HDR_SZ 32 #define BFI_IOC_FW_SIGNATURE (0xbfadbfad) +#define BFI_IOC_FW_INV_SIGN (0xdeaddead) #define BFI_IOC_MD5SUM_SZ 4 + +struct bfi_ioc_fwver { +#ifdef __BIG_ENDIAN + u8 patch; + u8 maint; + u8 minor; + u8 major; + u8 rsvd[2]; + u8 build; + u8 phase; +#else + u8 major; + u8 minor; + u8 maint; + u8 patch; + u8 phase; + u8 build; + u8 rsvd[2]; +#endif +}; + struct bfi_ioc_image_hdr { u32 signature; /*!< constant signature */ u8 asic_gen; /*!< asic generation */ @@ -270,10 +265,18 @@ struct bfi_ioc_image_hdr { u8 port1_mode; /*!< device mode for port 1 */ u32 exec; /*!< exec vector */ u32 bootenv; /*!< firmware boot env */ - u32 rsvd_b[4]; + u32 rsvd_b[2]; + struct bfi_ioc_fwver fwver; u32 md5sum[BFI_IOC_MD5SUM_SZ]; }; +enum bfi_ioc_img_ver_cmp { + BFI_IOC_IMG_VER_INCOMP, + BFI_IOC_IMG_VER_OLD, + BFI_IOC_IMG_VER_SAME, + BFI_IOC_IMG_VER_BETTER +}; + #define BFI_FWBOOT_DEVMODE_OFF 4 #define BFI_FWBOOT_TYPE_OFF 8 #define BFI_FWBOOT_ENV_OFF 12 @@ -299,9 +302,7 @@ struct bfi_ioc_hbeat { u32 hb_count; /*!< current heart beat count */ }; -/** - * IOC hardware/firmware state - */ +/* IOC hardware/firmware state */ enum bfi_ioc_state { BFI_IOC_UNINIT = 0, /*!< not initialized */ BFI_IOC_INITING = 1, /*!< h/w is being initialized */ @@ -345,9 +346,7 @@ enum { ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \ BFI_ADAPTER_UNSUPP)) -/** - * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages - */ +/* BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages */ struct bfi_ioc_ctrl_req { struct bfi_mhdr mh; u16 clscode; @@ -355,9 +354,7 @@ struct bfi_ioc_ctrl_req { u32 tv_sec; }; -/** - * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages - */ +/* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */ struct bfi_ioc_ctrl_reply { struct bfi_mhdr mh; /*!< Common msg header */ u8 status; /*!< enable/disable status */ @@ -367,9 +364,7 @@ struct bfi_ioc_ctrl_reply { }; #define BFI_IOC_MSGSZ 8 -/** - * H2I Messages - */ +/* H2I Messages */ union bfi_ioc_h2i_msg_u { struct bfi_mhdr mh; struct bfi_ioc_ctrl_req enable_req; @@ -378,17 +373,14 @@ union bfi_ioc_h2i_msg_u { u32 mboxmsg[BFI_IOC_MSGSZ]; }; -/** - * I2H Messages - */ +/* I2H Messages */ union bfi_ioc_i2h_msg_u { struct bfi_mhdr mh; struct bfi_ioc_ctrl_reply fw_event; u32 mboxmsg[BFI_IOC_MSGSZ]; }; -/** - *---------------------------------------------------------------------- +/*---------------------------------------------------------------------- * MSGQ *---------------------------------------------------------------------- */ diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h index 4eecabea397..6704a439297 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_cna.h +++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h @@ -37,18 +37,14 @@ enum bfi_port_i2h { BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), }; -/** - * Generic REQ type - */ +/* Generic REQ type */ struct bfi_port_generic_req { struct bfi_mhdr mh; /*!< msg header */ u32 msgtag; /*!< msgtag for reply */ u32 rsvd; }; -/** - * Generic RSP type - */ +/* Generic RSP type */ struct bfi_port_generic_rsp { struct bfi_mhdr mh; /*!< common msg header */ u8 status; /*!< port enable status */ @@ -56,44 +52,12 @@ struct bfi_port_generic_rsp { u32 msgtag; /*!< msgtag for reply */ }; -/** - * @todo - * BFI_PORT_H2I_ENABLE_REQ - */ - -/** - * @todo - * BFI_PORT_I2H_ENABLE_RSP - */ - -/** - * BFI_PORT_H2I_DISABLE_REQ - */ - -/** - * BFI_PORT_I2H_DISABLE_RSP - */ - -/** - * BFI_PORT_H2I_GET_STATS_REQ - */ +/* BFI_PORT_H2I_GET_STATS_REQ */ struct bfi_port_get_stats_req { struct bfi_mhdr mh; /*!< common msg header */ union bfi_addr_u dma_addr; }; -/** - * BFI_PORT_I2H_GET_STATS_RSP - */ - -/** - * BFI_PORT_H2I_CLEAR_STATS_REQ - */ - -/** - * BFI_PORT_I2H_CLEAR_STATS_RSP - */ - union bfi_port_h2i_msg_u { struct bfi_mhdr mh; struct bfi_port_generic_req enable_req; diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h index a90f1cf46b4..ae072dc5d23 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_enet.h +++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h @@ -16,12 +16,9 @@ * www.brocade.com */ -/** - * @file bfi_enet.h BNA Hardware and Firmware Interface - */ +/* BNA Hardware and Firmware Interface */ -/** - * Skipping statistics collection to avoid clutter. +/* Skipping statistics collection to avoid clutter. * Command is no longer needed: * MTU * TxQ Stop @@ -64,9 +61,7 @@ union bfi_addr_be_u { } a32; }; -/** - * T X Q U E U E D E F I N E S - */ +/* T X Q U E U E D E F I N E S */ /* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */ /* TxQ Entry Opcodes */ #define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */ @@ -106,10 +101,7 @@ struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */ union bfi_addr_be_u addr; }; -/** - * TxQ Entry Structure - * - */ +/* TxQ Entry Structure */ struct bfi_enet_txq_entry { union { struct bfi_enet_txq_wi_base base; @@ -124,16 +116,12 @@ struct bfi_enet_txq_entry { #define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \ (((_hdr_size) << 10) | ((_offset) & 0x3FF)) -/** - * R X Q U E U E D E F I N E S - */ +/* R X Q U E U E D E F I N E S */ struct bfi_enet_rxq_entry { union bfi_addr_be_u rx_buffer; }; -/** - * R X C O M P L E T I O N Q U E U E D E F I N E S - */ +/* R X C O M P L E T I O N Q U E U E D E F I N E S */ /* CQ Entry Flags */ #define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0) #define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1) @@ -174,9 +162,7 @@ struct bfi_enet_cq_entry { u8 rxq_id; }; -/** - * E N E T C O N T R O L P A T H C O M M A N D S - */ +/* E N E T C O N T R O L P A T H C O M M A N D S */ struct bfi_enet_q { union bfi_addr_u pg_tbl; union bfi_addr_u first_entry; @@ -222,9 +208,7 @@ struct bfi_enet_ib { u16 rsvd; }; -/** - * ENET command messages - */ +/* ENET command messages */ enum bfi_enet_h2i_msgs { /* Rx Commands */ BFI_ENET_H2I_RX_CFG_SET_REQ = 1, @@ -350,9 +334,7 @@ enum bfi_enet_i2h_msgs { BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4), }; -/** - * The following error codes can be returned by the enet commands - */ +/* The following error codes can be returned by the enet commands */ enum bfi_enet_err { BFI_ENET_CMD_OK = 0, BFI_ENET_CMD_FAIL = 1, @@ -364,8 +346,7 @@ enum bfi_enet_err { BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */ }; -/** - * Generic Request +/* Generic Request * * bfi_enet_req is used by: * BFI_ENET_H2I_RX_CFG_CLR_REQ @@ -375,8 +356,7 @@ struct bfi_enet_req { struct bfi_msgq_mhdr mh; }; -/** - * Enable/Disable Request +/* Enable/Disable Request * * bfi_enet_enable_req is used by: * BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero) @@ -391,9 +371,7 @@ struct bfi_enet_enable_req { u8 rsvd[3]; }; -/** - * Generic Response - */ +/* Generic Response */ struct bfi_enet_rsp { struct bfi_msgq_mhdr mh; u8 error; /*!< if error see cmd_offset */ @@ -401,20 +379,16 @@ struct bfi_enet_rsp { u16 cmd_offset; /*!< offset to invalid parameter */ }; -/** - * GLOBAL CONFIGURATION - */ +/* GLOBAL CONFIGURATION */ -/** - * bfi_enet_attr_req is used by: +/* bfi_enet_attr_req is used by: * BFI_ENET_H2I_GET_ATTR_REQ */ struct bfi_enet_attr_req { struct bfi_msgq_mhdr mh; }; -/** - * bfi_enet_attr_rsp is used by: +/* bfi_enet_attr_rsp is used by: * BFI_ENET_I2H_GET_ATTR_RSP */ struct bfi_enet_attr_rsp { @@ -427,8 +401,7 @@ struct bfi_enet_attr_rsp { u32 rit_size; }; -/** - * Tx Configuration +/* Tx Configuration * * bfi_enet_tx_cfg is used by: * BFI_ENET_H2I_TX_CFG_SET_REQ @@ -477,8 +450,7 @@ struct bfi_enet_tx_cfg_rsp { } q_handles[BFI_ENET_TXQ_PRIO_MAX]; }; -/** - * Rx Configuration +/* Rx Configuration * * bfi_enet_rx_cfg is used by: * BFI_ENET_H2I_RX_CFG_SET_REQ @@ -500,7 +472,8 @@ enum bfi_enet_hds_type { struct bfi_enet_rx_cfg { u8 rxq_type; - u8 rsvd[3]; + u8 rsvd[1]; + u16 frame_size; struct { u8 max_header_size; @@ -553,8 +526,7 @@ struct bfi_enet_rx_cfg_rsp { } q_handles[BFI_ENET_RX_QSET_MAX]; }; -/** - * RIT +/* RIT * * bfi_enet_rit_req is used by: * BFI_ENET_H2I_RIT_CFG_REQ @@ -566,8 +538,7 @@ struct bfi_enet_rit_req { u8 table[BFI_ENET_RSS_RIT_MAX]; }; -/** - * RSS +/* RSS * * bfi_enet_rss_cfg_req is used by: * BFI_ENET_H2I_RSS_CFG_REQ @@ -591,8 +562,7 @@ struct bfi_enet_rss_cfg_req { struct bfi_enet_rss_cfg cfg; }; -/** - * MAC Unicast +/* MAC Unicast * * bfi_enet_rx_vlan_req is used by: * BFI_ENET_H2I_MAC_UCAST_SET_REQ @@ -606,17 +576,14 @@ struct bfi_enet_ucast_req { u8 rsvd[2]; }; -/** - * MAC Unicast + VLAN - */ +/* MAC Unicast + VLAN */ struct bfi_enet_mac_n_vlan_req { struct bfi_msgq_mhdr mh; u16 vlan_id; mac_t mac_addr; }; -/** - * MAC Multicast +/* MAC Multicast * * bfi_enet_mac_mfilter_add_req is used by: * BFI_ENET_H2I_MAC_MCAST_ADD_REQ @@ -627,8 +594,7 @@ struct bfi_enet_mcast_add_req { u8 rsvd[2]; }; -/** - * bfi_enet_mac_mfilter_add_rsp is used by: +/* bfi_enet_mac_mfilter_add_rsp is used by: * BFI_ENET_I2H_MAC_MCAST_ADD_RSP */ struct bfi_enet_mcast_add_rsp { @@ -640,8 +606,7 @@ struct bfi_enet_mcast_add_rsp { u8 rsvd1[2]; }; -/** - * bfi_enet_mac_mfilter_del_req is used by: +/* bfi_enet_mac_mfilter_del_req is used by: * BFI_ENET_H2I_MAC_MCAST_DEL_REQ */ struct bfi_enet_mcast_del_req { @@ -650,8 +615,7 @@ struct bfi_enet_mcast_del_req { u8 rsvd[2]; }; -/** - * VLAN +/* VLAN * * bfi_enet_rx_vlan_req is used by: * BFI_ENET_H2I_RX_VLAN_SET_REQ @@ -663,8 +627,7 @@ struct bfi_enet_rx_vlan_req { u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX]; }; -/** - * PAUSE +/* PAUSE * * bfi_enet_set_pause_req is used by: * BFI_ENET_H2I_SET_PAUSE_REQ @@ -676,8 +639,7 @@ struct bfi_enet_set_pause_req { u8 rx_pause; /* 1 = enable; 0 = disable */ }; -/** - * DIAGNOSTICS +/* DIAGNOSTICS * * bfi_enet_diag_lb_req is used by: * BFI_ENET_H2I_DIAG_LOOPBACK @@ -689,16 +651,13 @@ struct bfi_enet_diag_lb_req { u8 enable; /* 1 = enable; 0 = disable */ }; -/** - * enum for Loopback opmodes - */ +/* enum for Loopback opmodes */ enum { BFI_ENET_DIAG_LB_OPMODE_EXT = 0, BFI_ENET_DIAG_LB_OPMODE_CBL = 1, }; -/** - * STATISTICS +/* STATISTICS * * bfi_enet_stats_req is used by: * BFI_ENET_H2I_STATS_GET_REQ @@ -713,9 +672,7 @@ struct bfi_enet_stats_req { union bfi_addr_u host_buffer; }; -/** - * defines for "stats_mask" above. - */ +/* defines for "stats_mask" above. */ #define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */ #define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */ #define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */ @@ -831,6 +788,7 @@ struct bfi_enet_stats_bpc { /* MAC Rx Statistics */ struct bfi_enet_stats_mac { + u64 stats_clr_cnt; /* times this stats cleared */ u64 frame_64; /* both rx and tx counter */ u64 frame_65_127; /* both rx and tx counter */ u64 frame_128_255; /* both rx and tx counter */ @@ -881,8 +839,7 @@ struct bfi_enet_stats_mac { u64 tx_fragments; }; -/** - * Complete statistics, DMAed from fw to host followed by +/* Complete statistics, DMAed from fw to host followed by * BFI_ENET_I2H_STATS_GET_RSP */ struct bfi_enet_stats { diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h index 0e094fe46df..c49fa312ddb 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_reg.h +++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h @@ -221,9 +221,7 @@ enum { #define __PMM_1T_RESET_P 0x00000001 #define PMM_1T_RESET_REG_P1 0x00023c1c -/** - * Brocade 1860 Adapter specific defines - */ +/* Brocade 1860 Adapter specific defines */ #define CT2_PCI_CPQ_BASE 0x00030000 #define CT2_PCI_APP_BASE 0x00030100 #define CT2_PCI_ETH_BASE 0x00030400 diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h index 4d7a5de08e1..1f512190d69 100644 --- a/drivers/net/ethernet/brocade/bna/bna.h +++ b/drivers/net/ethernet/brocade/bna/bna.h @@ -25,11 +25,7 @@ extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX]; -/** - * - * Macros and constants - * - */ +/* Macros and constants */ #define BNA_IOC_TIMER_FREQ 200 @@ -142,6 +138,8 @@ do { \ #define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \ ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1)) +#define BNA_QE_INDX_INC(_idx, _q_depth) BNA_QE_INDX_ADD(_idx, 1, _q_depth) + #define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \ (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1)) @@ -356,11 +354,15 @@ do { \ } \ } while (0) -/** - * - * Inline functions - * - */ +#define bna_mcam_mod_free_q(_bna) (&(_bna)->mcam_mod.free_q) + +#define bna_mcam_mod_del_q(_bna) (&(_bna)->mcam_mod.del_q) + +#define bna_ucam_mod_free_q(_bna) (&(_bna)->ucam_mod.free_q) + +#define bna_ucam_mod_del_q(_bna) (&(_bna)->ucam_mod.del_q) + +/* Inline functions */ static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr) { @@ -377,15 +379,9 @@ static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr) #define bna_attr(_bna) (&(_bna)->ioceth.attr) -/** - * - * Function prototypes - * - */ +/* Function prototypes */ -/** - * BNA - */ +/* BNA */ /* FW response handlers */ void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr); @@ -403,34 +399,25 @@ int bna_num_rxp_set(struct bna *bna, int num_rxp); void bna_hw_stats_get(struct bna *bna); /* APIs for RxF */ -struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod); -void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, - struct bna_mac *mac); -struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod); -void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, - struct bna_mac *mac); +struct bna_mac *bna_cam_mod_mac_get(struct list_head *head); +void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac); struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod); void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod, struct bna_mcam_handle *handle); -/** - * MBOX - */ +/* MBOX */ /* API for BNAD */ void bna_mbox_handler(struct bna *bna, u32 intr_status); -/** - * ETHPORT - */ +/* ETHPORT */ /* Callbacks for RX */ void bna_ethport_cb_rx_started(struct bna_ethport *ethport); void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport); -/** - * TX MODULE AND TX - */ +/* TX MODULE AND TX */ + /* FW response handelrs */ void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr); @@ -462,9 +449,7 @@ void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, void bna_tx_cleanup_complete(struct bna_tx *tx); void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo); -/** - * RX MODULE, RX, RXF - */ +/* RX MODULE, RX, RXF */ /* FW response handlers */ void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, @@ -474,6 +459,8 @@ void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr); void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr); +void bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr); /* APIs for BNA */ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, @@ -510,11 +497,17 @@ enum bna_cb_status bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac, void (*cbfn)(struct bnad *, struct bna_rx *)); enum bna_cb_status +bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist, + void (*cbfn)(struct bnad *, struct bna_rx *)); +enum bna_cb_status bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac, void (*cbfn)(struct bnad *, struct bna_rx *)); enum bna_cb_status bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac, void (*cbfn)(struct bnad *, struct bna_rx *)); +void +bna_rx_mcast_delall(struct bna_rx *rx, + void (*cbfn)(struct bnad *, struct bna_rx *)); enum bna_cb_status bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, enum bna_rxmode bitmask, @@ -522,9 +515,9 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); void bna_rx_vlanfilter_enable(struct bna_rx *rx); -/** - * ENET - */ +void bna_rx_vlan_strip_enable(struct bna_rx *rx); +void bna_rx_vlan_strip_disable(struct bna_rx *rx); +/* ENET */ /* API for RX */ int bna_enet_mtu_get(struct bna_enet *enet); @@ -544,18 +537,14 @@ void bna_enet_mtu_set(struct bna_enet *enet, int mtu, void (*cbfn)(struct bnad *)); void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac); -/** - * IOCETH - */ +/* IOCETH */ /* APIs for BNAD */ void bna_ioceth_enable(struct bna_ioceth *ioceth); void bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type); -/** - * BNAD - */ +/* BNAD */ /* Callbacks for ENET */ void bnad_cb_ethport_link_status(struct bnad *bnad, diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index 9ccc586e376..13f9636cdba 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -298,7 +298,6 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr) case BFI_ENET_I2H_RSS_ENABLE_RSP: case BFI_ENET_I2H_RX_PROMISCUOUS_RSP: case BFI_ENET_I2H_RX_DEFAULT_RSP: - case BFI_ENET_I2H_MAC_UCAST_SET_RSP: case BFI_ENET_I2H_MAC_UCAST_CLR_RSP: case BFI_ENET_I2H_MAC_UCAST_ADD_RSP: case BFI_ENET_I2H_MAC_UCAST_DEL_RSP: @@ -311,6 +310,12 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr) bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr); break; + case BFI_ENET_I2H_MAC_UCAST_SET_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr); + break; + case BFI_ENET_I2H_MAC_MCAST_ADD_RSP: bna_rx_from_rid(bna, msghdr->enet_id, rx); if (rx) @@ -378,9 +383,8 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr) } } -/** - * ETHPORT - */ +/* ETHPORT */ + #define call_ethport_stop_cbfn(_ethport) \ do { \ if ((_ethport)->stop_cbfn) { \ @@ -804,9 +808,8 @@ bna_ethport_cb_rx_stopped(struct bna_ethport *ethport) } } -/** - * ENET - */ +/* ENET */ + #define bna_enet_chld_start(enet) \ do { \ enum bna_tx_type tx_type = \ @@ -1328,9 +1331,8 @@ bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac) *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc); } -/** - * IOCETH - */ +/* IOCETH */ + #define enable_mbox_intr(_ioceth) \ do { \ u32 intr_status; \ @@ -1809,6 +1811,13 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna, list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q); } + /* A separate queue to allow synchronous setting of a list of MACs */ + INIT_LIST_HEAD(&ucam_mod->del_q); + for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) { + bfa_q_qe_init(&ucam_mod->ucmac[i].qe); + list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q); + } + ucam_mod->bna = bna; } @@ -1816,11 +1825,16 @@ static void bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod) { struct list_head *qe; - int i = 0; + int i; + i = 0; list_for_each(qe, &ucam_mod->free_q) i++; + i = 0; + list_for_each(qe, &ucam_mod->del_q) + i++; + ucam_mod->bna = NULL; } @@ -1849,6 +1863,13 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna, &mcam_mod->free_handle_q); } + /* A separate queue to allow synchronous setting of a list of MACs */ + INIT_LIST_HEAD(&mcam_mod->del_q); + for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) { + bfa_q_qe_init(&mcam_mod->mcmac[i].qe); + list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q); + } + mcam_mod->bna = bna; } @@ -1862,6 +1883,9 @@ bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod) list_for_each(qe, &mcam_mod->free_q) i++; i = 0; + list_for_each(qe, &mcam_mod->del_q) i++; + + i = 0; list_for_each(qe, &mcam_mod->free_handle_q) i++; mcam_mod->bna = NULL; @@ -1974,7 +1998,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info) BNA_MEM_T_KVA; res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1; res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len = - attr->num_ucmac * sizeof(struct bna_mac); + (attr->num_ucmac * 2) * sizeof(struct bna_mac); /* Virtual memory for Multicast MAC address - stored by mcam module */ res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM; @@ -1982,7 +2006,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info) BNA_MEM_T_KVA; res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1; res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len = - attr->num_mcmac * sizeof(struct bna_mac); + (attr->num_mcmac * 2) * sizeof(struct bna_mac); /* Virtual memory for Multicast handle - stored by mcam module */ res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM; @@ -2078,41 +2102,21 @@ bna_num_rxp_set(struct bna *bna, int num_rxp) } struct bna_mac * -bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod) -{ - struct list_head *qe; - - if (list_empty(&ucam_mod->free_q)) - return NULL; - - bfa_q_deq(&ucam_mod->free_q, &qe); - - return (struct bna_mac *)qe; -} - -void -bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac) -{ - list_add_tail(&mac->qe, &ucam_mod->free_q); -} - -struct bna_mac * -bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod) +bna_cam_mod_mac_get(struct list_head *head) { struct list_head *qe; - if (list_empty(&mcam_mod->free_q)) + if (list_empty(head)) return NULL; - bfa_q_deq(&mcam_mod->free_q, &qe); - + bfa_q_deq(head, &qe); return (struct bna_mac *)qe; } void -bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac) +bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac) { - list_add_tail(&mac->qe, &mcam_mod->free_q); + list_add_tail(&mac->qe, tail); } struct bna_mcam_handle * diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h index 4c6aab2a953..2702d02e98d 100644 --- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h +++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h @@ -16,20 +16,15 @@ * www.brocade.com */ -/** - * File for interrupt macros and functions - */ +/* File for interrupt macros and functions */ #ifndef __BNA_HW_DEFS_H__ #define __BNA_HW_DEFS_H__ #include "bfi_reg.h" -/** - * - * SW imposed limits - * - */ +/* SW imposed limits */ + #define BFI_ENET_DEF_TXQ 1 #define BFI_ENET_DEF_RXP 1 #define BFI_ENET_DEF_UCAM 1 @@ -51,7 +46,8 @@ #define BFI_MAX_INTERPKT_COUNT 0xFF #define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */ #define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */ -#define BFI_TX_INTERPKT_COUNT 32 +#define BFI_TX_INTERPKT_COUNT 12 /* Pkt Cnt = 12 */ +#define BFI_TX_INTERPKT_TIMEO 15 /* 15 * 0.5 = 7.5us */ #define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */ #define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */ #define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */ @@ -141,11 +137,8 @@ } #define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id) -/** - * - * Interrupt related bits, flags and macros - * - */ + +/* Interrupt related bits, flags and macros */ #define IB_STATUS_BITS 0x0000ffff @@ -280,11 +273,7 @@ do { \ (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \ (_rcb)->q_dbell)); -/** - * - * TxQ, RxQ, CQ related bits, offsets, macros - * - */ +/* TxQ, RxQ, CQ related bits, offsets, macros */ /* TxQ Entry Opcodes */ #define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */ @@ -333,12 +322,12 @@ do { \ #define BNA_CQ_EF_REMOTE (1 << 19) #define BNA_CQ_EF_LOCAL (1 << 20) - -/** - * - * Data structures - * +/* CAT2 ASIC does not use bit 21 as per the SPEC. + * Bit 31 is set in every end of frame completion */ +#define BNA_CQ_EF_EOP (1 << 31) + +/* Data structures */ struct bna_reg_offset { u32 fn_int_status; @@ -371,8 +360,7 @@ struct bna_txq_wi_vector { struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */ }; -/** - * TxQ Entry Structure +/* TxQ Entry Structure * * BEWARE: Load values into this structure with correct endianess. */ diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 276fcb589f4..85e63546abe 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -18,9 +18,7 @@ #include "bna.h" #include "bfi.h" -/** - * IB - */ +/* IB */ static void bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) { @@ -29,9 +27,7 @@ bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) (u32)ib->coalescing_timeo, 0); } -/** - * RXF - */ +/* RXF */ #define bna_rxf_vlan_cfg_soft_reset(rxf) \ do { \ @@ -533,13 +529,13 @@ bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf) struct list_head *qe; int ret; - /* Delete multicast entries previousely added */ + /* First delete multicast entries to maintain the count */ while (!list_empty(&rxf->mcast_pending_del_q)) { bfa_q_deq(&rxf->mcast_pending_del_q, &qe); bfa_q_qe_init(qe); mac = (struct bna_mac *)qe; ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP); - bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); + bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac); if (ret) return ret; } @@ -590,7 +586,7 @@ bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) bfa_q_qe_init(qe); mac = (struct bna_mac *)qe; ret = bna_rxf_mcast_del(rxf, mac, cleanup); - bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); + bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac); if (ret) return ret; } @@ -715,6 +711,21 @@ bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) } void +bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_rsp *rsp = + (struct bfi_enet_rsp *)msghdr; + + if (rsp->error) { + /* Clear ucast from cache */ + rxf->ucast_active_set = 0; + } + + bfa_fsm_send_event(rxf, RXF_E_FW_RESP); +} + +void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) { @@ -785,20 +796,20 @@ bna_rxf_uninit(struct bna_rxf *rxf) while (!list_empty(&rxf->ucast_pending_add_q)) { bfa_q_deq(&rxf->ucast_pending_add_q, &mac); bfa_q_qe_init(&mac->qe); - bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); + bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac); } if (rxf->ucast_pending_mac) { bfa_q_qe_init(&rxf->ucast_pending_mac->qe); - bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, - rxf->ucast_pending_mac); + bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), + rxf->ucast_pending_mac); rxf->ucast_pending_mac = NULL; } while (!list_empty(&rxf->mcast_pending_add_q)) { bfa_q_deq(&rxf->mcast_pending_add_q, &mac); bfa_q_qe_init(&mac->qe); - bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); + bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); } rxf->rxmode_pending = 0; @@ -858,7 +869,7 @@ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, if (rxf->ucast_pending_mac == NULL) { rxf->ucast_pending_mac = - bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod); + bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna)); if (rxf->ucast_pending_mac == NULL) return BNA_CB_UCAST_CAM_FULL; bfa_q_qe_init(&rxf->ucast_pending_mac->qe); @@ -889,7 +900,7 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr, return BNA_CB_SUCCESS; } - mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); + mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna)); if (mac == NULL) return BNA_CB_MCAST_LIST_FULL; bfa_q_qe_init(&mac->qe); @@ -905,35 +916,92 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr, } enum bna_cb_status -bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, +bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist, void (*cbfn)(struct bnad *, struct bna_rx *)) { + struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod; struct bna_rxf *rxf = &rx->rxf; struct list_head list_head; struct list_head *qe; u8 *mcaddr; - struct bna_mac *mac; + struct bna_mac *mac, *del_mac; int i; + /* Purge the pending_add_q */ + while (!list_empty(&rxf->ucast_pending_add_q)) { + bfa_q_deq(&rxf->ucast_pending_add_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + bna_cam_mod_mac_put(&ucam_mod->free_q, mac); + } + + /* Schedule active_q entries for deletion */ + while (!list_empty(&rxf->ucast_active_q)) { + bfa_q_deq(&rxf->ucast_active_q, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + + del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q); + memcpy(del_mac, mac, sizeof(*del_mac)); + list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q); + bna_cam_mod_mac_put(&ucam_mod->free_q, mac); + } + /* Allocate nodes */ INIT_LIST_HEAD(&list_head); - for (i = 0, mcaddr = mclist; i < count; i++) { - mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); + for (i = 0, mcaddr = uclist; i < count; i++) { + mac = bna_cam_mod_mac_get(&ucam_mod->free_q); if (mac == NULL) goto err_return; bfa_q_qe_init(&mac->qe); memcpy(mac->addr, mcaddr, ETH_ALEN); list_add_tail(&mac->qe, &list_head); - mcaddr += ETH_ALEN; } + /* Add the new entries */ + while (!list_empty(&list_head)) { + bfa_q_deq(&list_head, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + list_add_tail(&mac->qe, &rxf->ucast_pending_add_q); + } + + rxf->cam_fltr_cbfn = cbfn; + rxf->cam_fltr_cbarg = rx->bna->bnad; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + + return BNA_CB_SUCCESS; + +err_return: + while (!list_empty(&list_head)) { + bfa_q_deq(&list_head, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + bna_cam_mod_mac_put(&ucam_mod->free_q, mac); + } + + return BNA_CB_UCAST_CAM_FULL; +} + +enum bna_cb_status +bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, + void (*cbfn)(struct bnad *, struct bna_rx *)) +{ + struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod; + struct bna_rxf *rxf = &rx->rxf; + struct list_head list_head; + struct list_head *qe; + u8 *mcaddr; + struct bna_mac *mac, *del_mac; + int i; + /* Purge the pending_add_q */ while (!list_empty(&rxf->mcast_pending_add_q)) { bfa_q_deq(&rxf->mcast_pending_add_q, &qe); bfa_q_qe_init(qe); mac = (struct bna_mac *)qe; - bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); + bna_cam_mod_mac_put(&mcam_mod->free_q, mac); } /* Schedule active_q entries for deletion */ @@ -941,7 +1009,26 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, bfa_q_deq(&rxf->mcast_active_q, &qe); mac = (struct bna_mac *)qe; bfa_q_qe_init(&mac->qe); - list_add_tail(&mac->qe, &rxf->mcast_pending_del_q); + + del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q); + + memcpy(del_mac, mac, sizeof(*del_mac)); + list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); + mac->handle = NULL; + bna_cam_mod_mac_put(&mcam_mod->free_q, mac); + } + + /* Allocate nodes */ + INIT_LIST_HEAD(&list_head); + for (i = 0, mcaddr = mclist; i < count; i++) { + mac = bna_cam_mod_mac_get(&mcam_mod->free_q); + if (mac == NULL) + goto err_return; + bfa_q_qe_init(&mac->qe); + memcpy(mac->addr, mcaddr, ETH_ALEN); + list_add_tail(&mac->qe, &list_head); + + mcaddr += ETH_ALEN; } /* Add the new entries */ @@ -963,13 +1050,56 @@ err_return: bfa_q_deq(&list_head, &qe); mac = (struct bna_mac *)qe; bfa_q_qe_init(&mac->qe); - bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); + bna_cam_mod_mac_put(&mcam_mod->free_q, mac); } return BNA_CB_MCAST_LIST_FULL; } void +bna_rx_mcast_delall(struct bna_rx *rx, + void (*cbfn)(struct bnad *, struct bna_rx *)) +{ + struct bna_rxf *rxf = &rx->rxf; + struct list_head *qe; + struct bna_mac *mac, *del_mac; + int need_hw_config = 0; + + /* Purge all entries from pending_add_q */ + while (!list_empty(&rxf->mcast_pending_add_q)) { + bfa_q_deq(&rxf->mcast_pending_add_q, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); + } + + /* Schedule all entries in active_q for deletion */ + while (!list_empty(&rxf->mcast_active_q)) { + bfa_q_deq(&rxf->mcast_active_q, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + + del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna)); + + memcpy(del_mac, mac, sizeof(*del_mac)); + list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); + mac->handle = NULL; + bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); + need_hw_config = 1; + } + + if (need_hw_config) { + rxf->cam_fltr_cbfn = cbfn; + rxf->cam_fltr_cbarg = rx->bna->bnad; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + return; + } + + if (cbfn) + (*cbfn)(rx->bna->bnad, rx); +} + +void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id) { struct bna_rxf *rxf = &rx->rxf; @@ -1011,7 +1141,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf) bfa_q_qe_init(qe); mac = (struct bna_mac *)qe; bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ); - bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); + bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac); return 1; } @@ -1051,11 +1181,13 @@ bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) bfa_q_qe_init(qe); mac = (struct bna_mac *)qe; if (cleanup == BNA_SOFT_CLEANUP) - bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); + bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), + mac); else { bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ); - bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); + bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), + mac); return 1; } } @@ -1312,9 +1444,7 @@ bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf) return 0; } -/** - * RX - */ +/* RX */ #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \ (qcfg)->num_paths : ((qcfg)->num_paths * 2)) @@ -1361,6 +1491,8 @@ bfa_fsm_state_decl(bna_rx, stopped, struct bna_rx, enum bna_rx_event); bfa_fsm_state_decl(bna_rx, start_wait, struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, start_stop_wait, + struct bna_rx, enum bna_rx_event); bfa_fsm_state_decl(bna_rx, rxf_start_wait, struct bna_rx, enum bna_rx_event); bfa_fsm_state_decl(bna_rx, started, @@ -1408,7 +1540,7 @@ static void bna_rx_sm_start_wait_entry(struct bna_rx *rx) bna_bfi_rx_enet_start(rx); } -void +static void bna_rx_sm_stop_wait_entry(struct bna_rx *rx) { } @@ -1438,7 +1570,7 @@ static void bna_rx_sm_start_wait(struct bna_rx *rx, { switch (event) { case RX_E_STOP: - bfa_fsm_set_state(rx, bna_rx_sm_stop_wait); + bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait); break; case RX_E_FAIL: @@ -1461,7 +1593,7 @@ static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx) bna_rxf_start(&rx->rxf); } -void +static void bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) { } @@ -1494,7 +1626,30 @@ bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event) } -void +static void +bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_FAIL: + case RX_E_STOPPED: + bfa_fsm_set_state(rx, bna_rx_sm_stopped); + break; + + case RX_E_STARTED: + bna_rx_enet_stop(rx); + break; + + default: + bfa_sm_fault(event); + } +} + +static void bna_rx_sm_started_entry(struct bna_rx *rx) { struct bna_rxp *rxp; @@ -1559,12 +1714,12 @@ static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx, } } -void +static void bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx) { } -void +static void bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event) { switch (event) { @@ -1656,6 +1811,7 @@ bna_bfi_rx_enet_start(struct bna_rx *rx) cfg_req->mh.num_entries = htons( bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req))); + cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet); cfg_req->num_queue_sets = rx->num_paths; for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); i < rx->num_paths; @@ -1677,8 +1833,17 @@ bna_bfi_rx_enet_start(struct bna_rx *rx) /* Large/Single RxQ */ bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, &q0->qpt); - q0->buffer_size = - bna_enet_mtu_get(&rx->bna->enet); + if (q0->multi_buffer) + /* multi-buffer is enabled by allocating + * a new rx with new set of resources. + * q0->buffer_size should be initialized to + * fragment size. + */ + cfg_req->rx_cfg.multi_buffer = + BNA_STATUS_T_ENABLED; + else + q0->buffer_size = + bna_enet_mtu_get(&rx->bna->enet); cfg_req->q_cfg[i].ql.rx_buffer_size = htons((u16)q0->buffer_size); break; @@ -1914,6 +2079,9 @@ bna_rxq_qpt_setup(struct bna_rxq *rxq, struct bna_mem_descr *swqpt_mem, struct bna_mem_descr *page_mem) { + u8 *kva; + u64 dma; + struct bna_dma_addr bna_dma; int i; rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; @@ -1923,13 +2091,21 @@ bna_rxq_qpt_setup(struct bna_rxq *rxq, rxq->qpt.page_size = page_size; rxq->rcb->sw_qpt = (void **) swqpt_mem->kva; + rxq->rcb->sw_q = page_mem->kva; + + kva = page_mem->kva; + BNA_GET_DMA_ADDR(&page_mem->dma, dma); for (i = 0; i < rxq->qpt.page_count; i++) { - rxq->rcb->sw_qpt[i] = page_mem[i].kva; + rxq->rcb->sw_qpt[i] = kva; + kva += PAGE_SIZE; + + BNA_SET_DMA_ADDR(dma, &bna_dma); ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb = - page_mem[i].dma.lsb; + bna_dma.lsb; ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb = - page_mem[i].dma.msb; + bna_dma.msb; + dma += PAGE_SIZE; } } @@ -1941,6 +2117,9 @@ bna_rxp_cqpt_setup(struct bna_rxp *rxp, struct bna_mem_descr *swqpt_mem, struct bna_mem_descr *page_mem) { + u8 *kva; + u64 dma; + struct bna_dma_addr bna_dma; int i; rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; @@ -1950,14 +2129,21 @@ bna_rxp_cqpt_setup(struct bna_rxp *rxp, rxp->cq.qpt.page_size = page_size; rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva; + rxp->cq.ccb->sw_q = page_mem->kva; + + kva = page_mem->kva; + BNA_GET_DMA_ADDR(&page_mem->dma, dma); for (i = 0; i < rxp->cq.qpt.page_count; i++) { - rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva; + rxp->cq.ccb->sw_qpt[i] = kva; + kva += PAGE_SIZE; + BNA_SET_DMA_ADDR(dma, &bna_dma); ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb = - page_mem[i].dma.lsb; + bna_dma.lsb; ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb = - page_mem[i].dma.msb; + bna_dma.msb; + dma += PAGE_SIZE; } } @@ -2207,8 +2393,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) u32 hq_depth; u32 dq_depth; - dq_depth = q_cfg->q_depth; - hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth); + dq_depth = q_cfg->q0_depth; + hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth); cq_depth = dq_depth + hq_depth; BNA_TO_POWER_OF_2_HIGH(cq_depth); @@ -2256,8 +2442,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_DMA; - mem_info->len = PAGE_SIZE; - mem_info->num = cpage_count * q_cfg->num_paths; + mem_info->len = PAGE_SIZE * cpage_count; + mem_info->num = q_cfg->num_paths; res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info; @@ -2274,8 +2460,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_DMA; - mem_info->len = PAGE_SIZE; - mem_info->num = dpage_count * q_cfg->num_paths; + mem_info->len = PAGE_SIZE * dpage_count; + mem_info->num = q_cfg->num_paths; res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info; @@ -2292,8 +2478,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_DMA; - mem_info->len = (hpage_count ? PAGE_SIZE : 0); - mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0); + mem_info->len = PAGE_SIZE * hpage_count; + mem_info->num = (hpage_count ? q_cfg->num_paths : 0); res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info; @@ -2325,10 +2511,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, struct bna_rxq *q0; struct bna_rxq *q1; struct bna_intr_info *intr_info; - u32 page_count; + struct bna_mem_descr *hqunmap_mem; + struct bna_mem_descr *dqunmap_mem; struct bna_mem_descr *ccb_mem; struct bna_mem_descr *rcb_mem; - struct bna_mem_descr *unmapq_mem; struct bna_mem_descr *cqpt_mem; struct bna_mem_descr *cswqpt_mem; struct bna_mem_descr *cpage_mem; @@ -2338,8 +2524,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, struct bna_mem_descr *dsqpt_mem; struct bna_mem_descr *hpage_mem; struct bna_mem_descr *dpage_mem; - int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0; - int dpage_count, hpage_count, rcb_idx; + u32 dpage_count, hpage_count; + u32 hq_idx, dq_idx, rcb_idx; + u32 cq_depth, i; + u32 page_count; if (!bna_rx_res_check(rx_mod, rx_cfg)) return NULL; @@ -2347,7 +2535,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0]; rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0]; - unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0]; + dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0]; + hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0]; cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0]; cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0]; cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0]; @@ -2358,14 +2547,14 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0]; dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0]; - page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num / - rx_cfg->num_paths; + page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len / + PAGE_SIZE; - dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num / - rx_cfg->num_paths; + dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len / + PAGE_SIZE; - hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num / - rx_cfg->num_paths; + hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len / + PAGE_SIZE; rx = bna_rx_get(rx_mod, rx_cfg->rx_type); rx->bna = bna; @@ -2399,7 +2588,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, } rx->num_paths = rx_cfg->num_paths; - for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) { + for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0; + i < rx->num_paths; i++) { rxp = bna_rxp_get(rx_mod); list_add_tail(&rxp->qe, &rx->rxp_q); rxp->type = rx_cfg->rxp_type; @@ -2442,9 +2632,13 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, q0->rxp = rxp; q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; - q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva; - rcb_idx++; - q0->rcb->q_depth = rx_cfg->q_depth; + q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva; + rcb_idx++; dq_idx++; + q0->rcb->q_depth = rx_cfg->q0_depth; + q0->q_depth = rx_cfg->q0_depth; + q0->multi_buffer = rx_cfg->q0_multi_buf; + q0->buffer_size = rx_cfg->q0_buf_size; + q0->num_vecs = rx_cfg->q0_num_vecs; q0->rcb->rxq = q0; q0->rcb->bnad = bna->bnad; q0->rcb->id = 0; @@ -2452,10 +2646,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, - &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]); - q0->rcb->page_idx = dpage_idx; - q0->rcb->page_count = dpage_count; - dpage_idx += dpage_count; + &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]); if (rx->rcb_setup_cbfn) rx->rcb_setup_cbfn(bnad, q0->rcb); @@ -2467,24 +2658,24 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, q1->rxp = rxp; q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; - q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva; - rcb_idx++; - q1->rcb->q_depth = rx_cfg->q_depth; + q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva; + rcb_idx++; hq_idx++; + q1->rcb->q_depth = rx_cfg->q1_depth; + q1->q_depth = rx_cfg->q1_depth; + q1->multi_buffer = BNA_STATUS_T_DISABLED; + q1->num_vecs = 1; q1->rcb->rxq = q1; q1->rcb->bnad = bna->bnad; q1->rcb->id = 1; q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ? rx_cfg->hds_config.forced_offset - : rx_cfg->small_buff_size; + : rx_cfg->q1_buf_size; q1->rx_packets = q1->rx_bytes = 0; q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, &hqpt_mem[i], &hsqpt_mem[i], - &hpage_mem[hpage_idx]); - q1->rcb->page_idx = hpage_idx; - q1->rcb->page_count = hpage_count; - hpage_idx += hpage_count; + &hpage_mem[i]); if (rx->rcb_setup_cbfn) rx->rcb_setup_cbfn(bnad, q1->rcb); @@ -2493,9 +2684,14 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, /* Setup CQ */ rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; - rxp->cq.ccb->q_depth = rx_cfg->q_depth + - ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? - 0 : rx_cfg->q_depth); + cq_depth = rx_cfg->q0_depth + + ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? + 0 : rx_cfg->q1_depth); + /* if multi-buffer is enabled sum of q0_depth + * and q1_depth need not be a power of 2 + */ + BNA_TO_POWER_OF_2_HIGH(cq_depth); + rxp->cq.ccb->q_depth = cq_depth; rxp->cq.ccb->cq = &rxp->cq; rxp->cq.ccb->rcb[0] = q0->rcb; q0->rcb->ccb = rxp->cq.ccb; @@ -2516,10 +2712,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, rxp->cq.ccb->id = i; bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE, - &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]); - rxp->cq.ccb->page_idx = cpage_idx; - rxp->cq.ccb->page_count = page_count; - cpage_idx += page_count; + &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]); if (rx->ccb_setup_cbfn) rx->ccb_setup_cbfn(bnad, rxp->cq.ccb); @@ -2624,6 +2817,30 @@ bna_rx_cleanup_complete(struct bna_rx *rx) bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE); } +void +bna_rx_vlan_strip_enable(struct bna_rx *rx) +{ + struct bna_rxf *rxf = &rx->rxf; + + if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) { + rxf->vlan_strip_status = BNA_STATUS_T_ENABLED; + rxf->vlan_strip_pending = true; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + +void +bna_rx_vlan_strip_disable(struct bna_rx *rx) +{ + struct bna_rxf *rxf = &rx->rxf; + + if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) { + rxf->vlan_strip_status = BNA_STATUS_T_DISABLED; + rxf->vlan_strip_pending = true; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + enum bna_cb_status bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, enum bna_rxmode bitmask, @@ -2791,9 +3008,8 @@ const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { {1, 2}, }; -/** - * TX - */ +/* TX */ + #define call_tx_stop_cbfn(tx) \ do { \ if ((tx)->stop_cbfn) { \ @@ -3237,6 +3453,9 @@ bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size, struct bna_mem_descr *swqpt_mem, struct bna_mem_descr *page_mem) { + u8 *kva; + u64 dma; + struct bna_dma_addr bna_dma; int i; txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; @@ -3246,14 +3465,21 @@ bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size, txq->qpt.page_size = page_size; txq->tcb->sw_qpt = (void **) swqpt_mem->kva; + txq->tcb->sw_q = page_mem->kva; + + kva = page_mem->kva; + BNA_GET_DMA_ADDR(&page_mem->dma, dma); for (i = 0; i < page_count; i++) { - txq->tcb->sw_qpt[i] = page_mem[i].kva; + txq->tcb->sw_qpt[i] = kva; + kva += PAGE_SIZE; + BNA_SET_DMA_ADDR(dma, &bna_dma); ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb = - page_mem[i].dma.lsb; + bna_dma.lsb; ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb = - page_mem[i].dma.msb; + bna_dma.msb; + dma += PAGE_SIZE; } } @@ -3437,8 +3663,8 @@ bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info) res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info; mem_info->mem_type = BNA_MEM_T_DMA; - mem_info->len = PAGE_SIZE; - mem_info->num = num_txq * page_count; + mem_info->len = PAGE_SIZE * page_count; + mem_info->num = num_txq; res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info; @@ -3464,14 +3690,11 @@ bna_tx_create(struct bna *bna, struct bnad *bnad, struct bna_txq *txq; struct list_head *qe; int page_count; - int page_size; - int page_idx; int i; intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; - page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) / - tx_cfg->num_txq; - page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len; + page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) / + PAGE_SIZE; /** * Get resources @@ -3536,7 +3759,6 @@ bna_tx_create(struct bna *bna, struct bnad *bnad, /* TxQ */ i = 0; - page_idx = 0; list_for_each(qe, &tx->txq_q) { txq = (struct bna_txq *)qe; txq->tcb = (struct bna_tcb *) @@ -3558,7 +3780,7 @@ bna_tx_create(struct bna *bna, struct bnad *bnad, if (intr_info->intr_type == BNA_INTR_T_INTX) txq->ib.intr_vector = (1 << txq->ib.intr_vector); txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo; - txq->ib.interpkt_timeo = 0; /* Not used */ + txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO; txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT; /* TCB */ @@ -3576,14 +3798,11 @@ bna_tx_create(struct bna *bna, struct bnad *bnad, txq->tcb->id = i; /* QPT, SWQPT, Pages */ - bna_txq_qpt_setup(txq, page_count, page_size, + bna_txq_qpt_setup(txq, page_count, PAGE_SIZE, &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i], &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i], &res_info[BNA_TX_RES_MEM_T_PAGE]. - res_u.mem_info.mdl[page_idx]); - txq->tcb->page_idx = page_idx; - txq->tcb->page_count = page_count; - page_idx += page_count; + res_u.mem_info.mdl[i]); /* Callback to bnad for setting up TCB */ if (tx->tcb_setup_cbfn) diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index e8d3ab7ea6c..621547cd350 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -23,11 +23,7 @@ #include "bfa_cee.h" #include "bfa_msgq.h" -/** - * - * Forward declarations - * - */ +/* Forward declarations */ struct bna_mcam_handle; struct bna_txq; @@ -40,11 +36,7 @@ struct bna_enet; struct bna; struct bnad; -/** - * - * Enums, primitive data types - * - */ +/* Enums, primitive data types */ enum bna_status { BNA_STATUS_T_DISABLED = 0, @@ -117,20 +109,21 @@ enum bna_tx_res_req_type { enum bna_rx_mem_type { BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */ BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */ - BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */ - BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */ - BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */ - BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */ - BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */ - BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */ - BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */ - BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */ - BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */ - BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */ - BNA_RX_RES_MEM_T_IBIDX = 12, - BNA_RX_RES_MEM_T_RIT = 13, - BNA_RX_RES_T_INTR = 14, /* Rx interrupts */ - BNA_RX_RES_T_MAX = 15 + BNA_RX_RES_MEM_T_UNMAPHQ = 2, + BNA_RX_RES_MEM_T_UNMAPDQ = 3, + BNA_RX_RES_MEM_T_CQPT = 4, + BNA_RX_RES_MEM_T_CSWQPT = 5, + BNA_RX_RES_MEM_T_CQPT_PAGE = 6, + BNA_RX_RES_MEM_T_HQPT = 7, + BNA_RX_RES_MEM_T_DQPT = 8, + BNA_RX_RES_MEM_T_HSWQPT = 9, + BNA_RX_RES_MEM_T_DSWQPT = 10, + BNA_RX_RES_MEM_T_DPAGE = 11, + BNA_RX_RES_MEM_T_HPAGE = 12, + BNA_RX_RES_MEM_T_IBIDX = 13, + BNA_RX_RES_MEM_T_RIT = 14, + BNA_RX_RES_T_INTR = 15, + BNA_RX_RES_T_MAX = 16 }; enum bna_tx_type { @@ -331,11 +324,7 @@ struct bna_attr { int max_rit_size; }; -/** - * - * IOCEth - * - */ +/* IOCEth */ struct bna_ioceth { bfa_fsm_t fsm; @@ -351,11 +340,7 @@ struct bna_ioceth { struct bna *bna; }; -/** - * - * Enet - * - */ +/* Enet */ /* Pause configuration */ struct bna_pause_config { @@ -390,11 +375,7 @@ struct bna_enet { struct bna *bna; }; -/** - * - * Ethport - * - */ +/* Ethport */ struct bna_ethport { bfa_fsm_t fsm; @@ -419,11 +400,7 @@ struct bna_ethport { struct bna *bna; }; -/** - * - * Interrupt Block - * - */ +/* Interrupt Block */ /* Doorbell structure */ struct bna_ib_dbell { @@ -447,17 +424,14 @@ struct bna_ib { int interpkt_timeo; }; -/** - * - * Tx object - * - */ +/* Tx object */ /* Tx datapath control structure */ #define BNA_Q_NAME_SIZE 16 struct bna_tcb { /* Fast path */ void **sw_qpt; + void *sw_q; void *unmap_q; u32 producer_index; u32 consumer_index; @@ -465,8 +439,6 @@ struct bna_tcb { u32 q_depth; void __iomem *q_dbell; struct bna_ib_dbell *i_dbell; - int page_idx; - int page_count; /* Control path */ struct bna_txq *txq; struct bnad *bnad; @@ -585,23 +557,18 @@ struct bna_tx_mod { struct bna *bna; }; -/** - * - * Rx object - * - */ +/* Rx object */ /* Rx datapath control structure */ struct bna_rcb { /* Fast path */ void **sw_qpt; + void *sw_q; void *unmap_q; u32 producer_index; u32 consumer_index; u32 q_depth; void __iomem *q_dbell; - int page_idx; - int page_count; /* Control path */ struct bna_rxq *rxq; struct bna_ccb *ccb; @@ -617,6 +584,8 @@ struct bna_rxq { int buffer_size; int q_depth; + u32 num_vecs; + enum bna_status multi_buffer; struct bna_qpt qpt; struct bna_rcb *rcb; @@ -658,6 +627,7 @@ struct bna_pkt_rate { struct bna_ccb { /* Fast path */ void **sw_qpt; + void *sw_q; u32 producer_index; volatile u32 *hw_producer_index; u32 q_depth; @@ -665,8 +635,8 @@ struct bna_ccb { struct bna_rcb *rcb[2]; void *ctrl; /* For bnad */ struct bna_pkt_rate pkt_rate; - int page_idx; - int page_count; + u32 pkts_una; + u32 bytes_per_intr; /* Control path */ struct bna_cq *cq; @@ -706,14 +676,22 @@ struct bna_rx_config { int num_paths; enum bna_rxp_type rxp_type; int paused; - int q_depth; int coalescing_timeo; /* * Small/Large (or Header/Data) buffer size to be configured - * for SLR and HDS queue type. Large buffer size comes from - * enet->mtu. + * for SLR and HDS queue type. */ - int small_buff_size; + u32 frame_size; + + /* header or small queue */ + u32 q1_depth; + u32 q1_buf_size; + + /* data or large queue */ + u32 q0_depth; + u32 q0_buf_size; + u32 q0_num_vecs; + enum bna_status q0_multi_buf; enum bna_status rss_status; struct bna_rss_config rss_config; @@ -898,15 +876,12 @@ struct bna_rx_mod { u32 rid_mask; }; -/** - * - * CAM - * - */ +/* CAM */ struct bna_ucam_mod { - struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */ + struct bna_mac *ucmac; /* num_ucmac * 2 entries */ struct list_head free_q; + struct list_head del_q; struct bna *bna; }; @@ -919,19 +894,16 @@ struct bna_mcam_handle { }; struct bna_mcam_mod { - struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */ - struct bna_mcam_handle *mchandle; /* BFI_MAX_MCMAC entries */ + struct bna_mac *mcmac; /* num_mcmac * 2 entries */ + struct bna_mcam_handle *mchandle; /* num_mcmac entries */ struct list_head free_q; + struct list_head del_q; struct list_head free_handle_q; struct bna *bna; }; -/** - * - * Statistics - * - */ +/* Statistics */ struct bna_stats { struct bna_dma_addr hw_stats_dma; @@ -949,11 +921,7 @@ struct bna_stats_mod { struct bfi_enet_stats_req stats_clr; }; -/** - * - * BNA - * - */ +/* BNA */ struct bna { struct bna_ident ident; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 67cd2ed0306..3a77f9ead00 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -52,7 +52,7 @@ MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1," /* * Global variables */ -u32 bnad_rxqs_per_cq = 2; +static u32 bnad_rxqs_per_cq = 2; static u32 bna_id; static struct mutex bnad_list_mutex; static LIST_HEAD(bnad_list); @@ -61,23 +61,17 @@ static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; /* * Local MACROS */ -#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2) - -#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth) - #define BNAD_GET_MBOX_IRQ(_bnad) \ (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \ ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \ ((_bnad)->pcidev->irq)) -#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \ +#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \ do { \ (_res_info)->res_type = BNA_RES_T_MEM; \ (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \ (_res_info)->res_u.mem_info.num = (_num); \ - (_res_info)->res_u.mem_info.len = \ - sizeof(struct bnad_unmap_q) + \ - (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ + (_res_info)->res_u.mem_info.len = (_size); \ } while (0) static void @@ -103,48 +97,59 @@ bnad_remove_from_list(struct bnad *bnad) static void bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb) { - struct bna_cq_entry *cmpl, *next_cmpl; - unsigned int wi_range, wis = 0, ccb_prod = 0; + struct bna_cq_entry *cmpl; int i; - BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl, - wi_range); - for (i = 0; i < ccb->q_depth; i++) { - wis++; - if (likely(--wi_range)) - next_cmpl = cmpl + 1; - else { - BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth); - wis = 0; - BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, - next_cmpl, wi_range); - } + cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i]; cmpl->valid = 0; - cmpl = next_cmpl; } } +/* Tx Datapath functions */ + + +/* Caller should ensure that the entry at unmap_q[index] is valid */ static u32 -bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array, - u32 index, u32 depth, struct sk_buff *skb, u32 frag) +bnad_tx_buff_unmap(struct bnad *bnad, + struct bnad_tx_unmap *unmap_q, + u32 q_depth, u32 index) { - int j; - array[index].skb = NULL; - - dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr), - skb_headlen(skb), DMA_TO_DEVICE); - dma_unmap_addr_set(&array[index], dma_addr, 0); - BNA_QE_INDX_ADD(index, 1, depth); + struct bnad_tx_unmap *unmap; + struct sk_buff *skb; + int vector, nvecs; + + unmap = &unmap_q[index]; + nvecs = unmap->nvecs; + + skb = unmap->skb; + unmap->skb = NULL; + unmap->nvecs = 0; + dma_unmap_single(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vectors[0], dma_addr), + skb_headlen(skb), DMA_TO_DEVICE); + dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0); + nvecs--; + + vector = 0; + while (nvecs) { + vector++; + if (vector == BFI_TX_MAX_VECTORS_PER_WI) { + vector = 0; + BNA_QE_INDX_INC(index, q_depth); + unmap = &unmap_q[index]; + } - for (j = 0; j < frag; j++) { - dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr), - skb_frag_size(&skb_shinfo(skb)->frags[j]), - DMA_TO_DEVICE); - dma_unmap_addr_set(&array[index], dma_addr, 0); - BNA_QE_INDX_ADD(index, 1, depth); + dma_unmap_page(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vectors[vector], dma_addr), + dma_unmap_len(&unmap->vectors[vector], dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0); + nvecs--; } + BNA_QE_INDX_INC(index, q_depth); + return index; } @@ -154,80 +159,64 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array, * so DMA unmap & freeing is fine. */ static void -bnad_txq_cleanup(struct bnad *bnad, - struct bna_tcb *tcb) +bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb) { - u32 unmap_cons; - struct bnad_unmap_q *unmap_q = tcb->unmap_q; - struct bnad_skb_unmap *unmap_array; - struct sk_buff *skb = NULL; - int q; - - unmap_array = unmap_q->unmap_array; + struct bnad_tx_unmap *unmap_q = tcb->unmap_q; + struct sk_buff *skb; + int i; - for (q = 0; q < unmap_q->q_depth; q++) { - skb = unmap_array[q].skb; + for (i = 0; i < tcb->q_depth; i++) { + skb = unmap_q[i].skb; if (!skb) continue; - - unmap_cons = q; - unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array, - unmap_cons, unmap_q->q_depth, skb, - skb_shinfo(skb)->nr_frags); + bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); dev_kfree_skb_any(skb); } } -/* Data Path Handlers */ - /* * bnad_txcmpl_process : Frees the Tx bufs on Tx completion * Can be called in a) Interrupt context * b) Sending context */ static u32 -bnad_txcmpl_process(struct bnad *bnad, - struct bna_tcb *tcb) +bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) { - u32 unmap_cons, sent_packets = 0, sent_bytes = 0; - u16 wis, updated_hw_cons; - struct bnad_unmap_q *unmap_q = tcb->unmap_q; - struct bnad_skb_unmap *unmap_array; - struct sk_buff *skb; + u32 sent_packets = 0, sent_bytes = 0; + u32 wis, unmap_wis, hw_cons, cons, q_depth; + struct bnad_tx_unmap *unmap_q = tcb->unmap_q; + struct bnad_tx_unmap *unmap; + struct sk_buff *skb; /* Just return if TX is stopped */ if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) return 0; - updated_hw_cons = *(tcb->hw_consumer_index); - - wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index, - updated_hw_cons, tcb->q_depth); + hw_cons = *(tcb->hw_consumer_index); + cons = tcb->consumer_index; + q_depth = tcb->q_depth; + wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth); BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); - unmap_array = unmap_q->unmap_array; - unmap_cons = unmap_q->consumer_index; - - prefetch(&unmap_array[unmap_cons + 1]); while (wis) { - skb = unmap_array[unmap_cons].skb; + unmap = &unmap_q[cons]; + + skb = unmap->skb; sent_packets++; sent_bytes += skb->len; - wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags); - unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array, - unmap_cons, unmap_q->q_depth, skb, - skb_shinfo(skb)->nr_frags); + unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs); + wis -= unmap_wis; + cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons); dev_kfree_skb_any(skb); } /* Update consumer pointers. */ - tcb->consumer_index = updated_hw_cons; - unmap_q->consumer_index = unmap_cons; + tcb->consumer_index = hw_cons; tcb->txq->tx_packets += sent_packets; tcb->txq->tx_bytes += sent_bytes; @@ -260,7 +249,7 @@ bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb) if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) bna_ib_ack(tcb->i_dbell, sent); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); return sent; @@ -278,133 +267,353 @@ bnad_msix_tx(int irq, void *data) return IRQ_HANDLED; } -static void -bnad_rcb_cleanup(struct bnad *bnad, struct bna_rcb *rcb) +static inline void +bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + + unmap_q->reuse_pi = -1; + unmap_q->alloc_order = -1; + unmap_q->map_size = 0; + unmap_q->type = BNAD_RXBUF_NONE; +} + +/* Default is page-based allocation. Multi-buffer support - TBD */ +static int +bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + int order; + + bnad_rxq_alloc_uninit(bnad, rcb); + + order = get_order(rcb->rxq->buffer_size); + + unmap_q->type = BNAD_RXBUF_PAGE; + + if (bna_is_small_rxq(rcb->id)) { + unmap_q->alloc_order = 0; + unmap_q->map_size = rcb->rxq->buffer_size; + } else { + if (rcb->rxq->multi_buffer) { + unmap_q->alloc_order = 0; + unmap_q->map_size = rcb->rxq->buffer_size; + unmap_q->type = BNAD_RXBUF_MULTI_BUFF; + } else { + unmap_q->alloc_order = order; + unmap_q->map_size = + (rcb->rxq->buffer_size > 2048) ? + PAGE_SIZE << order : 2048; + } + } + + BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size)); + + return 0; +} + +static inline void +bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap) { - struct bnad_unmap_q *unmap_q = rcb->unmap_q; + if (!unmap->page) + return; - rcb->producer_index = 0; - rcb->consumer_index = 0; + dma_unmap_page(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vector, dma_addr), + unmap->vector.len, DMA_FROM_DEVICE); + put_page(unmap->page); + unmap->page = NULL; + dma_unmap_addr_set(&unmap->vector, dma_addr, 0); + unmap->vector.len = 0; +} + +static inline void +bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap) +{ + if (!unmap->skb) + return; - unmap_q->producer_index = 0; - unmap_q->consumer_index = 0; + dma_unmap_single(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vector, dma_addr), + unmap->vector.len, DMA_FROM_DEVICE); + dev_kfree_skb_any(unmap->skb); + unmap->skb = NULL; + dma_unmap_addr_set(&unmap->vector, dma_addr, 0); + unmap->vector.len = 0; } static void bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb) { - struct bnad_unmap_q *unmap_q; - struct bnad_skb_unmap *unmap_array; - struct sk_buff *skb; - int unmap_cons; + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + int i; - unmap_q = rcb->unmap_q; - unmap_array = unmap_q->unmap_array; - for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { - skb = unmap_array[unmap_cons].skb; - if (!skb) - continue; - unmap_array[unmap_cons].skb = NULL; - dma_unmap_single(&bnad->pcidev->dev, - dma_unmap_addr(&unmap_array[unmap_cons], - dma_addr), - rcb->rxq->buffer_size, - DMA_FROM_DEVICE); - dev_kfree_skb(skb); + for (i = 0; i < rcb->q_depth; i++) { + struct bnad_rx_unmap *unmap = &unmap_q->unmap[i]; + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) + bnad_rxq_cleanup_skb(bnad, unmap); + else + bnad_rxq_cleanup_page(bnad, unmap); } - bnad_rcb_cleanup(bnad, rcb); + bnad_rxq_alloc_uninit(bnad, rcb); } -static void -bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) +static u32 +bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) { - u16 to_alloc, alloced, unmap_prod, wi_range; - struct bnad_unmap_q *unmap_q = rcb->unmap_q; - struct bnad_skb_unmap *unmap_array; + u32 alloced, prod, q_depth; + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + struct bnad_rx_unmap *unmap, *prev; struct bna_rxq_entry *rxent; - struct sk_buff *skb; + struct page *page; + u32 page_offset, alloc_size; dma_addr_t dma_addr; + prod = rcb->producer_index; + q_depth = rcb->q_depth; + + alloc_size = PAGE_SIZE << unmap_q->alloc_order; alloced = 0; - to_alloc = - BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth); - unmap_array = unmap_q->unmap_array; - unmap_prod = unmap_q->producer_index; + while (nalloc--) { + unmap = &unmap_q->unmap[prod]; + + if (unmap_q->reuse_pi < 0) { + page = alloc_pages(GFP_ATOMIC | __GFP_COMP, + unmap_q->alloc_order); + page_offset = 0; + } else { + prev = &unmap_q->unmap[unmap_q->reuse_pi]; + page = prev->page; + page_offset = prev->page_offset + unmap_q->map_size; + get_page(page); + } + + if (unlikely(!page)) { + BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); + rcb->rxq->rxbuf_alloc_failed++; + goto finishing; + } + + dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, + unmap_q->map_size, DMA_FROM_DEVICE); + + unmap->page = page; + unmap->page_offset = page_offset; + dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); + unmap->vector.len = unmap_q->map_size; + page_offset += unmap_q->map_size; + + if (page_offset < alloc_size) + unmap_q->reuse_pi = prod; + else + unmap_q->reuse_pi = -1; + + rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; + BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); + BNA_QE_INDX_INC(prod, q_depth); + alloced++; + } + +finishing: + if (likely(alloced)) { + rcb->producer_index = prod; + smp_mb(); + if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) + bna_rxq_prod_indx_doorbell(rcb); + } + + return alloced; +} + +static u32 +bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) +{ + u32 alloced, prod, q_depth, buff_sz; + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + struct bnad_rx_unmap *unmap; + struct bna_rxq_entry *rxent; + struct sk_buff *skb; + dma_addr_t dma_addr; - BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range); + buff_sz = rcb->rxq->buffer_size; + prod = rcb->producer_index; + q_depth = rcb->q_depth; + + alloced = 0; + while (nalloc--) { + unmap = &unmap_q->unmap[prod]; + + skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz); - while (to_alloc--) { - if (!wi_range) - BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, - wi_range); - skb = netdev_alloc_skb_ip_align(bnad->netdev, - rcb->rxq->buffer_size); if (unlikely(!skb)) { BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); rcb->rxq->rxbuf_alloc_failed++; goto finishing; } - unmap_array[unmap_prod].skb = skb; dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, - rcb->rxq->buffer_size, - DMA_FROM_DEVICE); - dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, - dma_addr); - BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); - BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); + buff_sz, DMA_FROM_DEVICE); + + unmap->skb = skb; + dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); + unmap->vector.len = buff_sz; - rxent++; - wi_range--; + rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; + BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); + BNA_QE_INDX_INC(prod, q_depth); alloced++; } finishing: if (likely(alloced)) { - unmap_q->producer_index = unmap_prod; - rcb->producer_index = unmap_prod; + rcb->producer_index = prod; smp_mb(); if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) bna_rxq_prod_indx_doorbell(rcb); } + + return alloced; } static inline void -bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb) +bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) { - struct bnad_unmap_q *unmap_q = rcb->unmap_q; + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + u32 to_alloc; - if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { - if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) - >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) - bnad_rxq_post(bnad, rcb); - smp_mb__before_clear_bit(); - clear_bit(BNAD_RXQ_REFILL, &rcb->flags); + to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth); + if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) + return; + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) + bnad_rxq_refill_skb(bnad, rcb, to_alloc); + else + bnad_rxq_refill_page(bnad, rcb, to_alloc); +} + +#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ + BNA_CQ_EF_IPV6 | \ + BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \ + BNA_CQ_EF_L4_CKSUM_OK) + +#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ + BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK) +#define flags_tcp6 (BNA_CQ_EF_IPV6 | \ + BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK) +#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ + BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) +#define flags_udp6 (BNA_CQ_EF_IPV6 | \ + BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) + +static void +bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb, + u32 sop_ci, u32 nvecs) +{ + struct bnad_rx_unmap_q *unmap_q; + struct bnad_rx_unmap *unmap; + u32 ci, vec; + + unmap_q = rcb->unmap_q; + for (vec = 0, ci = sop_ci; vec < nvecs; vec++) { + unmap = &unmap_q->unmap[ci]; + BNA_QE_INDX_INC(ci, rcb->q_depth); + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) + bnad_rxq_cleanup_skb(bnad, unmap); + else + bnad_rxq_cleanup_page(bnad, unmap); + } +} + +static void +bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb, + u32 sop_ci, u32 nvecs, u32 last_fraglen) +{ + struct bnad *bnad; + u32 ci, vec, len, totlen = 0; + struct bnad_rx_unmap_q *unmap_q; + struct bnad_rx_unmap *unmap; + + unmap_q = rcb->unmap_q; + bnad = rcb->bnad; + + /* prefetch header */ + prefetch(page_address(unmap_q->unmap[sop_ci].page) + + unmap_q->unmap[sop_ci].page_offset); + + for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) { + unmap = &unmap_q->unmap[ci]; + BNA_QE_INDX_INC(ci, rcb->q_depth); + + dma_unmap_page(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vector, dma_addr), + unmap->vector.len, DMA_FROM_DEVICE); + + len = (vec == nvecs) ? + last_fraglen : unmap->vector.len; + totlen += len; + + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + unmap->page, unmap->page_offset, len); + + unmap->page = NULL; + unmap->vector.len = 0; } + + skb->len += totlen; + skb->data_len += totlen; + skb->truesize += totlen; +} + +static inline void +bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb, + struct bnad_rx_unmap *unmap, u32 len) +{ + prefetch(skb->data); + + dma_unmap_single(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vector, dma_addr), + unmap->vector.len, DMA_FROM_DEVICE); + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, bnad->netdev); + + unmap->skb = NULL; + unmap->vector.len = 0; } static u32 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) { - struct bna_cq_entry *cmpl, *next_cmpl; + struct bna_cq_entry *cq, *cmpl, *next_cmpl; struct bna_rcb *rcb = NULL; - unsigned int wi_range, packets = 0, wis = 0; - struct bnad_unmap_q *unmap_q; - struct bnad_skb_unmap *unmap_array; - struct sk_buff *skb; - u32 flags, unmap_cons; + struct bnad_rx_unmap_q *unmap_q; + struct bnad_rx_unmap *unmap = NULL; + struct sk_buff *skb = NULL; struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; - struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); - - if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) - return 0; + struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl; + u32 packets = 0, len = 0, totlen = 0; + u32 pi, vec, sop_ci = 0, nvecs = 0; + u32 flags, masked_flags; prefetch(bnad->netdev); - BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, - wi_range); - BUG_ON(!(wi_range <= ccb->q_depth)); - while (cmpl->valid && packets < budget) { - packets++; + + cq = ccb->sw_q; + cmpl = &cq[ccb->producer_index]; + + while (packets < budget) { + if (!cmpl->valid) + break; + /* The 'valid' field is set by the adapter, only after writing + * the other fields of completion entry. Hence, do not load + * other fields of completion entry *before* the 'valid' is + * loaded. Adding the rmb() here prevents the compiler and/or + * CPU from reordering the reads which would potentially result + * in reading stale values in completion entry. + */ + rmb(); + BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); if (bna_is_small_rxq(cmpl->rxq_id)) @@ -413,83 +622,117 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) rcb = ccb->rcb[0]; unmap_q = rcb->unmap_q; - unmap_array = unmap_q->unmap_array; - unmap_cons = unmap_q->consumer_index; - - skb = unmap_array[unmap_cons].skb; - BUG_ON(!(skb)); - unmap_array[unmap_cons].skb = NULL; - dma_unmap_single(&bnad->pcidev->dev, - dma_unmap_addr(&unmap_array[unmap_cons], - dma_addr), - rcb->rxq->buffer_size, - DMA_FROM_DEVICE); - BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); - - /* Should be more efficient ? Performance ? */ - BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth); - - wis++; - if (likely(--wi_range)) - next_cmpl = cmpl + 1; - else { - BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); - wis = 0; - BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, - next_cmpl, wi_range); - BUG_ON(!(wi_range <= ccb->q_depth)); + + /* start of packet ci */ + sop_ci = rcb->consumer_index; + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) { + unmap = &unmap_q->unmap[sop_ci]; + skb = unmap->skb; + } else { + skb = napi_get_frags(&rx_ctrl->napi); + if (unlikely(!skb)) + break; } - prefetch(next_cmpl); + prefetch(skb); flags = ntohl(cmpl->flags); - if (unlikely - (flags & - (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR | - BNA_CQ_EF_TOO_LONG))) { - dev_kfree_skb_any(skb); + len = ntohs(cmpl->length); + totlen = len; + nvecs = 1; + + /* Check all the completions for this frame. + * busy-wait doesn't help much, break here. + */ + if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) && + (flags & BNA_CQ_EF_EOP) == 0) { + pi = ccb->producer_index; + do { + BNA_QE_INDX_INC(pi, ccb->q_depth); + next_cmpl = &cq[pi]; + + if (!next_cmpl->valid) + break; + /* The 'valid' field is set by the adapter, only + * after writing the other fields of completion + * entry. Hence, do not load other fields of + * completion entry *before* the 'valid' is + * loaded. Adding the rmb() here prevents the + * compiler and/or CPU from reordering the reads + * which would potentially result in reading + * stale values in completion entry. + */ + rmb(); + + len = ntohs(next_cmpl->length); + flags = ntohl(next_cmpl->flags); + + nvecs++; + totlen += len; + } while ((flags & BNA_CQ_EF_EOP) == 0); + + if (!next_cmpl->valid) + break; + } + + /* TODO: BNA_CQ_EF_LOCAL ? */ + if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | + BNA_CQ_EF_FCS_ERROR | + BNA_CQ_EF_TOO_LONG))) { + bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs); rcb->rxq->rx_packets_with_error++; + goto next; } - skb_put(skb, ntohs(cmpl->length)); + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) + bnad_cq_setup_skb(bnad, skb, unmap, len); + else + bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); + + packets++; + rcb->rxq->rx_packets++; + rcb->rxq->rx_bytes += totlen; + ccb->bytes_per_intr += totlen; + + masked_flags = flags & flags_cksum_prot_mask; + if (likely ((bnad->netdev->features & NETIF_F_RXCSUM) && - (((flags & BNA_CQ_EF_IPV4) && - (flags & BNA_CQ_EF_L3_CKSUM_OK)) || - (flags & BNA_CQ_EF_IPV6)) && - (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) && - (flags & BNA_CQ_EF_L4_CKSUM_OK))) + ((masked_flags == flags_tcp4) || + (masked_flags == flags_udp4) || + (masked_flags == flags_tcp6) || + (masked_flags == flags_udp6)))) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); - rcb->rxq->rx_packets++; - rcb->rxq->rx_bytes += skb->len; - skb->protocol = eth_type_trans(skb, bnad->netdev); - - if (flags & BNA_CQ_EF_VLAN) - __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag)); + if ((flags & BNA_CQ_EF_VLAN) && + (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); - if (skb->ip_summed == CHECKSUM_UNNECESSARY) - napi_gro_receive(&rx_ctrl->napi, skb); - else + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) netif_receive_skb(skb); + else + napi_gro_frags(&rx_ctrl->napi); next: - cmpl->valid = 0; - cmpl = next_cmpl; + BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth); + for (vec = 0; vec < nvecs; vec++) { + cmpl = &cq[ccb->producer_index]; + cmpl->valid = 0; + BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth); + } + cmpl = &cq[ccb->producer_index]; } - BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); - + napi_gro_flush(&rx_ctrl->napi, false); if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) bna_ib_ack_disable_irq(ccb->i_dbell, packets); - bnad_refill_rxq(bnad, ccb->rcb[0]); + bnad_rxq_post(bnad, ccb->rcb[0]); if (ccb->rcb[1]) - bnad_refill_rxq(bnad, ccb->rcb[1]); - - clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags); + bnad_rxq_post(bnad, ccb->rcb[1]); return packets; } @@ -764,12 +1007,9 @@ bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb) { struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tcb->txq->tx->priv; - struct bnad_unmap_q *unmap_q = tcb->unmap_q; + tcb->priv = tcb; tx_info->tcb[tcb->id] = tcb; - unmap_q->producer_index = 0; - unmap_q->consumer_index = 0; - unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH; } static void @@ -783,16 +1023,6 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) } static void -bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb) -{ - struct bnad_unmap_q *unmap_q = rcb->unmap_q; - - unmap_q->producer_index = 0; - unmap_q->consumer_index = 0; - unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH; -} - -static void bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) { struct bnad_rx_info *rx_info = @@ -878,10 +1108,9 @@ bnad_tx_cleanup(struct delayed_work *work) struct bnad_tx_info *tx_info = container_of(work, struct bnad_tx_info, tx_cleanup_work); struct bnad *bnad = NULL; - struct bnad_unmap_q *unmap_q; struct bna_tcb *tcb; unsigned long flags; - uint32_t i, pending = 0; + u32 i, pending = 0; for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { tcb = tx_info->tcb[i]; @@ -897,11 +1126,7 @@ bnad_tx_cleanup(struct delayed_work *work) bnad_txq_cleanup(bnad, tcb); - unmap_q = tcb->unmap_q; - unmap_q->producer_index = 0; - unmap_q->consumer_index = 0; - - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); } @@ -916,7 +1141,6 @@ bnad_tx_cleanup(struct delayed_work *work) spin_unlock_irqrestore(&bnad->bna_lock, flags); } - static void bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) { @@ -965,7 +1189,7 @@ bnad_rx_cleanup(void *work) struct bnad_rx_ctrl *rx_ctrl; struct bnad *bnad = NULL; unsigned long flags; - uint32_t i; + u32 i; for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { rx_ctrl = &rx_info->rx_ctrl[i]; @@ -1022,9 +1246,7 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) struct bna_ccb *ccb; struct bna_rcb *rcb; struct bnad_rx_ctrl *rx_ctrl; - struct bnad_unmap_q *unmap_q; - int i; - int j; + int i, j; for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { rx_ctrl = &rx_info->rx_ctrl[i]; @@ -1039,19 +1261,10 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) if (!rcb) continue; + bnad_rxq_alloc_init(bnad, rcb); set_bit(BNAD_RXQ_STARTED, &rcb->flags); set_bit(BNAD_RXQ_POST_OK, &rcb->flags); - unmap_q = rcb->unmap_q; - - /* Now allocate & post buffers for this RCB */ - /* !!Allocation in callback context */ - if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { - if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) - >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) - bnad_rxq_post(bnad, rcb); - smp_mb__before_clear_bit(); - clear_bit(BNAD_RXQ_REFILL, &rcb->flags); - } + bnad_rxq_post(bnad, rcb); } } } @@ -1153,9 +1366,8 @@ bnad_mem_alloc(struct bnad *bnad, mem_info->mdl[i].len = mem_info->len; mem_info->mdl[i].kva = dma_alloc_coherent(&bnad->pcidev->dev, - mem_info->len, &dma_pa, - GFP_KERNEL); - + mem_info->len, &dma_pa, + GFP_KERNEL); if (mem_info->mdl[i].kva == NULL) goto err_return; @@ -1302,8 +1514,7 @@ bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src, return 0; } -/** - * NOTE: Should be called for MSIX only +/* NOTE: Should be called for MSIX only * Unregisters Tx MSIX vector(s) from the kernel */ static void @@ -1322,8 +1533,7 @@ bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info, } } -/** - * NOTE: Should be called for MSIX only +/* NOTE: Should be called for MSIX only * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel */ static int @@ -1354,8 +1564,7 @@ err_return: return -1; } -/** - * NOTE: Should be called for MSIX only +/* NOTE: Should be called for MSIX only * Unregisters Rx MSIX vector(s) from the kernel */ static void @@ -1375,8 +1584,7 @@ bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info, } } -/** - * NOTE: Should be called for MSIX only +/* NOTE: Should be called for MSIX only * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel */ static int @@ -1779,10 +1987,9 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id) spin_unlock_irqrestore(&bnad->bna_lock, flags); /* Fill Unmap Q memory requirements */ - BNAD_FILL_UNMAPQ_MEM_REQ( - &res_info[BNA_TX_RES_MEM_T_UNMAPQ], - bnad->num_txq_per_tx, - BNAD_TX_UNMAPQ_DEPTH); + BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ], + bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) * + bnad->txq_depth)); /* Allocate resources */ err = bnad_tx_res_alloc(bnad, res_info, tx_id); @@ -1794,8 +2001,10 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id) tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info, tx_info); spin_unlock_irqrestore(&bnad->bna_lock, flags); - if (!tx) + if (!tx) { + err = -ENOMEM; goto err_return; + } tx_info->tx = tx; INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, @@ -1806,7 +2015,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id) err = bnad_tx_msix_register(bnad, tx_info, tx_id, bnad->num_txq_per_tx); if (err) - goto err_return; + goto cleanup_tx; } spin_lock_irqsave(&bnad->bna_lock, flags); @@ -1815,6 +2024,12 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id) return 0; +cleanup_tx: + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_destroy(tx_info->tx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + tx_info->tx = NULL; + tx_info->tx_id = 0; err_return: bnad_tx_res_free(bnad, res_info); return err; @@ -1825,6 +2040,7 @@ err_return: static void bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) { + memset(rx_config, 0, sizeof(*rx_config)); rx_config->rx_type = BNA_RX_T_REGULAR; rx_config->num_paths = bnad->num_rxp_per_rx; rx_config->coalescing_timeo = bnad->rx_coalescing_timeo; @@ -1845,12 +2061,43 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) memset(&rx_config->rss_config, 0, sizeof(rx_config->rss_config)); } + + rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu); + rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED; + + /* BNA_RXP_SINGLE - one data-buffer queue + * BNA_RXP_SLR - one small-buffer and one large-buffer queues + * BNA_RXP_HDS - one header-buffer and one data-buffer queues + */ + /* TODO: configurable param for queue type */ rx_config->rxp_type = BNA_RXP_SLR; - rx_config->q_depth = bnad->rxq_depth; - rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE; + if (BNAD_PCI_DEV_IS_CAT2(bnad) && + rx_config->frame_size > 4096) { + /* though size_routing_enable is set in SLR, + * small packets may get routed to same rxq. + * set buf_size to 2048 instead of PAGE_SIZE. + */ + rx_config->q0_buf_size = 2048; + /* this should be in multiples of 2 */ + rx_config->q0_num_vecs = 4; + rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs; + rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED; + } else { + rx_config->q0_buf_size = rx_config->frame_size; + rx_config->q0_num_vecs = 1; + rx_config->q0_depth = bnad->rxq_depth; + } + + /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */ + if (rx_config->rxp_type == BNA_RXP_SLR) { + rx_config->q1_depth = bnad->rxq_depth; + rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE; + } - rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED; + rx_config->vlan_strip_status = + (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ? + BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; } static void @@ -1864,6 +2111,49 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id) } /* Called with mutex_lock(&bnad->conf_mutex) held */ +static u32 +bnad_reinit_rx(struct bnad *bnad) +{ + struct net_device *netdev = bnad->netdev; + u32 err = 0, current_err = 0; + u32 rx_id = 0, count = 0; + unsigned long flags; + + /* destroy and create new rx objects */ + for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { + if (!bnad->rx_info[rx_id].rx) + continue; + bnad_destroy_rx(bnad, rx_id); + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_mtu_set(&bnad->bna.enet, + BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { + count++; + current_err = bnad_setup_rx(bnad, rx_id); + if (current_err && !err) { + err = current_err; + pr_err("RXQ:%u setup failed\n", rx_id); + } + } + + /* restore rx configuration */ + if (bnad->rx_info[0].rx && !err) { + bnad_restore_vlans(bnad, 0); + bnad_enable_default_bcast(bnad); + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_mac_addr_set_locked(bnad, netdev->dev_addr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + bnad_set_rx_mode(netdev); + } + + return count; +} + +/* Called with bnad_conf_lock() held */ void bnad_destroy_rx(struct bnad *bnad, u32 rx_id) { @@ -1920,7 +2210,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; static const struct bna_rx_event_cbfn rx_cbfn = { - .rcb_setup_cbfn = bnad_cb_rcb_setup, + .rcb_setup_cbfn = NULL, .rcb_destroy_cbfn = NULL, .ccb_setup_cbfn = bnad_cb_ccb_setup, .ccb_destroy_cbfn = bnad_cb_ccb_destroy, @@ -1942,12 +2232,19 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) spin_unlock_irqrestore(&bnad->bna_lock, flags); /* Fill Unmap Q memory requirements */ - BNAD_FILL_UNMAPQ_MEM_REQ( - &res_info[BNA_RX_RES_MEM_T_UNMAPQ], - rx_config->num_paths + - ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 : - rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH); - + BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ], + rx_config->num_paths, + (rx_config->q0_depth * + sizeof(struct bnad_rx_unmap)) + + sizeof(struct bnad_rx_unmap_q)); + + if (rx_config->rxp_type != BNA_RXP_SINGLE) { + BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ], + rx_config->num_paths, + (rx_config->q1_depth * + sizeof(struct bnad_rx_unmap) + + sizeof(struct bnad_rx_unmap_q))); + } /* Allocate resource */ err = bnad_rx_res_alloc(bnad, res_info, rx_id); if (err) @@ -2199,12 +2496,10 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) { int err; - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) { - BNAD_UPDATE_CTR(bnad, tso_err); - return err; - } + err = skb_cow_head(skb, 0); + if (err < 0) { + BNAD_UPDATE_CTR(bnad, tso_err); + return err; } /* @@ -2372,9 +2667,11 @@ bnad_enable_msix(struct bnad *bnad) for (i = 0; i < bnad->msix_num; i++) bnad->msix_table[i].entry = i; - ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num); - if (ret > 0) { - /* Not enough MSI-X vectors. */ + ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table, + 1, bnad->msix_num); + if (ret < 0) { + goto intx_mode; + } else if (ret < bnad->msix_num) { pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n", ret, bnad->msix_num); @@ -2387,18 +2684,11 @@ bnad_enable_msix(struct bnad *bnad) bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP + BNAD_MAILBOX_MSIX_VECTORS; - if (bnad->msix_num > ret) - goto intx_mode; - - /* Try once more with adjusted numbers */ - /* If this fails, fall back to INTx */ - ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, - bnad->msix_num); - if (ret) + if (bnad->msix_num > ret) { + pci_disable_msix(bnad->pcidev); goto intx_mode; - - } else if (ret < 0) - goto intx_mode; + } + } pci_intx(bnad->pcidev, 0); @@ -2442,7 +2732,6 @@ bnad_open(struct net_device *netdev) int err; struct bnad *bnad = netdev_priv(netdev); struct bna_pause_config pause_config; - int mtu; unsigned long flags; mutex_lock(&bnad->conf_mutex); @@ -2461,10 +2750,9 @@ bnad_open(struct net_device *netdev) pause_config.tx_pause = 0; pause_config.rx_pause = 0; - mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN; - spin_lock_irqsave(&bnad->bna_lock, flags); - bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL); + bna_enet_mtu_set(&bnad->bna.enet, + BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL); bna_enet_enable(&bnad->bna.enet); spin_unlock_irqrestore(&bnad->bna_lock, flags); @@ -2527,166 +2815,71 @@ bnad_stop(struct net_device *netdev) } /* TX */ -/* - * bnad_start_xmit : Netdev entry point for Transmit - * Called under lock held by net_device - */ -static netdev_tx_t -bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) +/* Returns 0 for success */ +static int +bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, + struct sk_buff *skb, struct bna_txq_entry *txqent) { - struct bnad *bnad = netdev_priv(netdev); - u32 txq_id = 0; - struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id]; - - u16 txq_prod, vlan_tag = 0; - u32 unmap_prod, wis, wis_used, wi_range; - u32 vectors, vect_id, i, acked; - int err; - unsigned int len; - u32 gso_size; - - struct bnad_unmap_q *unmap_q = tcb->unmap_q; - dma_addr_t dma_addr; - struct bna_txq_entry *txqent; - u16 flags; - - if (unlikely(skb->len <= ETH_HLEN)) { - dev_kfree_skb(skb); - BNAD_UPDATE_CTR(bnad, tx_skb_too_short); - return NETDEV_TX_OK; - } - if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) { - dev_kfree_skb(skb); - BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long); - return NETDEV_TX_OK; - } - if (unlikely(skb_headlen(skb) == 0)) { - dev_kfree_skb(skb); - BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); - return NETDEV_TX_OK; - } - - /* - * Takes care of the Tx that is scheduled between clearing the flag - * and the netif_tx_stop_all_queues() call. - */ - if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { - dev_kfree_skb(skb); - BNAD_UPDATE_CTR(bnad, tx_skb_stopping); - return NETDEV_TX_OK; - } - - vectors = 1 + skb_shinfo(skb)->nr_frags; - if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) { - dev_kfree_skb(skb); - BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); - return NETDEV_TX_OK; - } - wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ - acked = 0; - if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) || - vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) { - if ((u16) (*tcb->hw_consumer_index) != - tcb->consumer_index && - !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { - acked = bnad_txcmpl_process(bnad, tcb); - if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) - bna_ib_ack(tcb->i_dbell, acked); - smp_mb__before_clear_bit(); - clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); - } else { - netif_stop_queue(netdev); - BNAD_UPDATE_CTR(bnad, netif_queue_stop); - } - - smp_mb(); - /* - * Check again to deal with race condition between - * netif_stop_queue here, and netif_wake_queue in - * interrupt handler which is not inside netif tx lock. - */ - if (likely - (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) || - vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) { - BNAD_UPDATE_CTR(bnad, netif_queue_stop); - return NETDEV_TX_BUSY; - } else { - netif_wake_queue(netdev); - BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); - } - } - - unmap_prod = unmap_q->producer_index; - flags = 0; - - txq_prod = tcb->producer_index; - BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range); - txqent->hdr.wi.reserved = 0; - txqent->hdr.wi.num_vectors = vectors; + u16 flags = 0; + u32 gso_size; + u16 vlan_tag = 0; if (vlan_tx_tag_present(skb)) { - vlan_tag = (u16) vlan_tx_tag_get(skb); + vlan_tag = (u16)vlan_tx_tag_get(skb); flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); } if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { - vlan_tag = - (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff); + vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT) + | (vlan_tag & 0x1fff); flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); } - txqent->hdr.wi.vlan_tag = htons(vlan_tag); if (skb_is_gso(skb)) { gso_size = skb_shinfo(skb)->gso_size; - - if (unlikely(gso_size > netdev->mtu)) { - dev_kfree_skb(skb); + if (unlikely(gso_size > bnad->netdev->mtu)) { BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long); - return NETDEV_TX_OK; + return -EINVAL; } if (unlikely((gso_size + skb_transport_offset(skb) + - tcp_hdrlen(skb)) >= skb->len)) { - txqent->hdr.wi.opcode = - __constant_htons(BNA_TXQ_WI_SEND); + tcp_hdrlen(skb)) >= skb->len)) { + txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); txqent->hdr.wi.lso_mss = 0; BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short); } else { - txqent->hdr.wi.opcode = - __constant_htons(BNA_TXQ_WI_SEND_LSO); + txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO); txqent->hdr.wi.lso_mss = htons(gso_size); } - err = bnad_tso_prepare(bnad, skb); - if (unlikely(err)) { - dev_kfree_skb(skb); + if (bnad_tso_prepare(bnad, skb)) { BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare); - return NETDEV_TX_OK; + return -EINVAL; } + flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM); txqent->hdr.wi.l4_hdr_size_n_offset = - htons(BNA_TXQ_WI_L4_HDR_N_OFFSET - (tcp_hdrlen(skb) >> 2, - skb_transport_offset(skb))); - } else { - txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND); + htons(BNA_TXQ_WI_L4_HDR_N_OFFSET( + tcp_hdrlen(skb) >> 2, skb_transport_offset(skb))); + } else { + txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); txqent->hdr.wi.lso_mss = 0; - if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) { - dev_kfree_skb(skb); + if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) { BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); - return NETDEV_TX_OK; + return -EINVAL; } if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 proto = 0; - if (skb->protocol == __constant_htons(ETH_P_IP)) + if (skb->protocol == htons(ETH_P_IP)) proto = ip_hdr(skb)->protocol; - else if (skb->protocol == - __constant_htons(ETH_P_IPV6)) { +#ifdef NETIF_F_IPV6_CSUM + else if (skb->protocol == htons(ETH_P_IPV6)) { /* nexthdr may not be TCP immediately. */ proto = ipv6_hdr(skb)->nexthdr; } +#endif if (proto == IPPROTO_TCP) { flags |= BNA_TXQ_WI_CF_TCP_CKSUM; txqent->hdr.wi.l4_hdr_size_n_offset = @@ -2696,12 +2889,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) BNAD_UPDATE_CTR(bnad, tcpcsum_offload); if (unlikely(skb_headlen(skb) < - skb_transport_offset(skb) + tcp_hdrlen(skb))) { - dev_kfree_skb(skb); + skb_transport_offset(skb) + + tcp_hdrlen(skb))) { BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr); - return NETDEV_TX_OK; + return -EINVAL; } - } else if (proto == IPPROTO_UDP) { flags |= BNA_TXQ_WI_CF_UDP_CKSUM; txqent->hdr.wi.l4_hdr_size_n_offset = @@ -2710,105 +2902,193 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) BNAD_UPDATE_CTR(bnad, udpcsum_offload); if (unlikely(skb_headlen(skb) < - skb_transport_offset(skb) + + skb_transport_offset(skb) + sizeof(struct udphdr))) { - dev_kfree_skb(skb); BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr); - return NETDEV_TX_OK; + return -EINVAL; } } else { - dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_csum_err); - return NETDEV_TX_OK; + return -EINVAL; } - } else { + } else txqent->hdr.wi.l4_hdr_size_n_offset = 0; - } } txqent->hdr.wi.flags = htons(flags); - txqent->hdr.wi.frame_length = htonl(skb->len); - unmap_q->unmap_array[unmap_prod].skb = skb; + return 0; +} + +/* + * bnad_start_xmit : Netdev entry point for Transmit + * Called under lock held by net_device + */ +static netdev_tx_t +bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + u32 txq_id = 0; + struct bna_tcb *tcb = NULL; + struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap; + u32 prod, q_depth, vect_id; + u32 wis, vectors, len; + int i; + dma_addr_t dma_addr; + struct bna_txq_entry *txqent; + len = skb_headlen(skb); - txqent->vector[0].length = htons(len); - dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, - dma_addr); - BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); - BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); + /* Sanity checks for the skb */ + + if (unlikely(skb->len <= ETH_HLEN)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_too_short); + return NETDEV_TX_OK; + } + if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); + return NETDEV_TX_OK; + } + if (unlikely(len == 0)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); + return NETDEV_TX_OK; + } + + tcb = bnad->tx_info[0].tcb[txq_id]; + + /* + * Takes care of the Tx that is scheduled between clearing the flag + * and the netif_tx_stop_all_queues() call. + */ + if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_stopping); + return NETDEV_TX_OK; + } + + q_depth = tcb->q_depth; + prod = tcb->producer_index; + unmap_q = tcb->unmap_q; + + vectors = 1 + skb_shinfo(skb)->nr_frags; + wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ + + if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); + return NETDEV_TX_OK; + } - vect_id = 0; - wis_used = 1; + /* Check for available TxQ resources */ + if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) { + if ((*tcb->hw_consumer_index != tcb->consumer_index) && + !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { + u32 sent; + sent = bnad_txcmpl_process(bnad, tcb); + if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) + bna_ib_ack(tcb->i_dbell, sent); + smp_mb__before_atomic(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + } else { + netif_stop_queue(netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_stop); + } + + smp_mb(); + /* + * Check again to deal with race condition between + * netif_stop_queue here, and netif_wake_queue in + * interrupt handler which is not inside netif tx lock. + */ + if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) { + BNAD_UPDATE_CTR(bnad, netif_queue_stop); + return NETDEV_TX_BUSY; + } else { + netif_wake_queue(netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + } + } + + txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; + head_unmap = &unmap_q[prod]; + + /* Program the opcode, flags, frame_len, num_vectors in WI */ + if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + txqent->hdr.wi.reserved = 0; + txqent->hdr.wi.num_vectors = vectors; + + head_unmap->skb = skb; + head_unmap->nvecs = 0; + + /* Program the vectors */ + unmap = head_unmap; + dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, + len, DMA_TO_DEVICE); + BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); + txqent->vector[0].length = htons(len); + dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); + head_unmap->nvecs++; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + for (i = 0, vect_id = 0; i < vectors - 1; i++) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; - u16 size = skb_frag_size(frag); + u32 size = skb_frag_size(frag); if (unlikely(size == 0)) { - unmap_prod = unmap_q->producer_index; - - unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev, - unmap_q->unmap_array, - unmap_prod, unmap_q->q_depth, skb, - i); - dev_kfree_skb(skb); + /* Undo the changes starting at tcb->producer_index */ + bnad_tx_buff_unmap(bnad, unmap_q, q_depth, + tcb->producer_index); + dev_kfree_skb_any(skb); BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); return NETDEV_TX_OK; } len += size; - if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) { + vect_id++; + if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) { vect_id = 0; - if (--wi_range) - txqent++; - else { - BNA_QE_INDX_ADD(txq_prod, wis_used, - tcb->q_depth); - wis_used = 0; - BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, - txqent, wi_range); - } - wis_used++; - txqent->hdr.wi_ext.opcode = - __constant_htons(BNA_TXQ_WI_EXTENSION); + BNA_QE_INDX_INC(prod, q_depth); + txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; + txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION); + unmap = &unmap_q[prod]; } - BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR)); - txqent->vector[vect_id].length = htons(size); dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, 0, size, DMA_TO_DEVICE); - dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, - dma_addr); + dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size); BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); - BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); + txqent->vector[vect_id].length = htons(size); + dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr, + dma_addr); + head_unmap->nvecs++; } if (unlikely(len != skb->len)) { - unmap_prod = unmap_q->producer_index; - - unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev, - unmap_q->unmap_array, unmap_prod, - unmap_q->q_depth, skb, - skb_shinfo(skb)->nr_frags); - dev_kfree_skb(skb); + /* Undo the changes starting at tcb->producer_index */ + bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index); + dev_kfree_skb_any(skb); BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); return NETDEV_TX_OK; } - unmap_q->producer_index = unmap_prod; - BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth); - tcb->producer_index = txq_prod; + BNA_QE_INDX_INC(prod, q_depth); + tcb->producer_index = prod; smp_mb(); if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) return NETDEV_TX_OK; + skb_tx_timestamp(skb); + bna_txq_prod_indx_doorbell(tcb); smp_mb(); @@ -2835,73 +3115,128 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) return stats; } +static void +bnad_set_rx_ucast_fltr(struct bnad *bnad) +{ + struct net_device *netdev = bnad->netdev; + int uc_count = netdev_uc_count(netdev); + enum bna_cb_status ret; + u8 *mac_list; + struct netdev_hw_addr *ha; + int entry; + + if (netdev_uc_empty(bnad->netdev)) { + bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL); + return; + } + + if (uc_count > bna_attr(&bnad->bna)->num_ucmac) + goto mode_default; + + mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC); + if (mac_list == NULL) + goto mode_default; + + entry = 0; + netdev_for_each_uc_addr(ha, netdev) { + memcpy(&mac_list[entry * ETH_ALEN], + &ha->addr[0], ETH_ALEN); + entry++; + } + + ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, + mac_list, NULL); + kfree(mac_list); + + if (ret != BNA_CB_SUCCESS) + goto mode_default; + + return; + + /* ucast packets not in UCAM are routed to default function */ +mode_default: + bnad->cfg_flags |= BNAD_CF_DEFAULT; + bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL); +} + +static void +bnad_set_rx_mcast_fltr(struct bnad *bnad) +{ + struct net_device *netdev = bnad->netdev; + int mc_count = netdev_mc_count(netdev); + enum bna_cb_status ret; + u8 *mac_list; + + if (netdev->flags & IFF_ALLMULTI) + goto mode_allmulti; + + if (netdev_mc_empty(netdev)) + return; + + if (mc_count > bna_attr(&bnad->bna)->num_mcmac) + goto mode_allmulti; + + mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC); + + if (mac_list == NULL) + goto mode_allmulti; + + memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN); + + /* copy rest of the MCAST addresses */ + bnad_netdev_mc_list_get(netdev, mac_list); + ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, + mac_list, NULL); + kfree(mac_list); + + if (ret != BNA_CB_SUCCESS) + goto mode_allmulti; + + return; + +mode_allmulti: + bnad->cfg_flags |= BNAD_CF_ALLMULTI; + bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL); +} + void bnad_set_rx_mode(struct net_device *netdev) { struct bnad *bnad = netdev_priv(netdev); - u32 new_mask, valid_mask; + enum bna_rxmode new_mode, mode_mask; unsigned long flags; spin_lock_irqsave(&bnad->bna_lock, flags); - new_mask = valid_mask = 0; - - if (netdev->flags & IFF_PROMISC) { - if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) { - new_mask = BNAD_RXMODE_PROMISC_DEFAULT; - valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; - bnad->cfg_flags |= BNAD_CF_PROMISC; - } - } else { - if (bnad->cfg_flags & BNAD_CF_PROMISC) { - new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT; - valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; - bnad->cfg_flags &= ~BNAD_CF_PROMISC; - } - } - - if (netdev->flags & IFF_ALLMULTI) { - if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) { - new_mask |= BNA_RXMODE_ALLMULTI; - valid_mask |= BNA_RXMODE_ALLMULTI; - bnad->cfg_flags |= BNAD_CF_ALLMULTI; - } - } else { - if (bnad->cfg_flags & BNAD_CF_ALLMULTI) { - new_mask &= ~BNA_RXMODE_ALLMULTI; - valid_mask |= BNA_RXMODE_ALLMULTI; - bnad->cfg_flags &= ~BNAD_CF_ALLMULTI; - } + if (bnad->rx_info[0].rx == NULL) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return; } - if (bnad->rx_info[0].rx == NULL) - goto unlock; + /* clear bnad flags to update it with new settings */ + bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT | + BNAD_CF_ALLMULTI); - bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL); - - if (!netdev_mc_empty(netdev)) { - u8 *mcaddr_list; - int mc_count = netdev_mc_count(netdev); + new_mode = 0; + if (netdev->flags & IFF_PROMISC) { + new_mode |= BNAD_RXMODE_PROMISC_DEFAULT; + bnad->cfg_flags |= BNAD_CF_PROMISC; + } else { + bnad_set_rx_mcast_fltr(bnad); - /* Index 0 holds the broadcast address */ - mcaddr_list = - kzalloc((mc_count + 1) * ETH_ALEN, - GFP_ATOMIC); - if (!mcaddr_list) - goto unlock; + if (bnad->cfg_flags & BNAD_CF_ALLMULTI) + new_mode |= BNA_RXMODE_ALLMULTI; - memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN); + bnad_set_rx_ucast_fltr(bnad); - /* Copy rest of the MC addresses */ - bnad_netdev_mc_list_get(netdev, mcaddr_list); + if (bnad->cfg_flags & BNAD_CF_DEFAULT) + new_mode |= BNA_RXMODE_DEFAULT; + } - bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, - mcaddr_list, NULL); + mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT | + BNA_RXMODE_ALLMULTI; + bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL); - /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */ - kfree(mcaddr_list); - } -unlock: spin_unlock_irqrestore(&bnad->bna_lock, flags); } @@ -2931,14 +3266,14 @@ bnad_set_mac_address(struct net_device *netdev, void *mac_addr) } static int -bnad_mtu_set(struct bnad *bnad, int mtu) +bnad_mtu_set(struct bnad *bnad, int frame_size) { unsigned long flags; init_completion(&bnad->bnad_completions.mtu_comp); spin_lock_irqsave(&bnad->bna_lock, flags); - bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set); + bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set); spin_unlock_irqrestore(&bnad->bna_lock, flags); wait_for_completion(&bnad->bnad_completions.mtu_comp); @@ -2949,18 +3284,34 @@ bnad_mtu_set(struct bnad *bnad, int mtu) static int bnad_change_mtu(struct net_device *netdev, int new_mtu) { - int err, mtu = netdev->mtu; + int err, mtu; struct bnad *bnad = netdev_priv(netdev); + u32 rx_count = 0, frame, new_frame; if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU) return -EINVAL; mutex_lock(&bnad->conf_mutex); + mtu = netdev->mtu; netdev->mtu = new_mtu; - mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN; - err = bnad_mtu_set(bnad, mtu); + frame = BNAD_FRAME_SIZE(mtu); + new_frame = BNAD_FRAME_SIZE(new_mtu); + + /* check if multi-buffer needs to be enabled */ + if (BNAD_PCI_DEV_IS_CAT2(bnad) && + netif_running(bnad->netdev)) { + /* only when transition is over 4K */ + if ((frame <= 4096 && new_frame > 4096) || + (frame > 4096 && new_frame <= 4096)) + rx_count = bnad_reinit_rx(bnad); + } + + /* rx_count > 0 - new rx created + * - Linux set err = 0 and return + */ + err = bnad_mtu_set(bnad, new_frame); if (err) err = -EBUSY; @@ -2969,8 +3320,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu) } static int -bnad_vlan_rx_add_vid(struct net_device *netdev, - unsigned short vid) +bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct bnad *bnad = netdev_priv(netdev); unsigned long flags; @@ -2991,8 +3341,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev, } static int -bnad_vlan_rx_kill_vid(struct net_device *netdev, - unsigned short vid) +bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct bnad *bnad = netdev_priv(netdev); unsigned long flags; @@ -3012,6 +3361,27 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev, return 0; } +static int bnad_set_features(struct net_device *dev, netdev_features_t features) +{ + struct bnad *bnad = netdev_priv(dev); + netdev_features_t changed = features ^ dev->features; + + if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) { + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); + else + bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + } + + return 0; +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void bnad_netpoll(struct net_device *netdev) @@ -3059,6 +3429,7 @@ static const struct net_device_ops bnad_netdev_ops = { .ndo_change_mtu = bnad_change_mtu, .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid, + .ndo_set_features = bnad_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bnad_netpoll #endif @@ -3071,14 +3442,14 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac) netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX; + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; - netdev->features |= netdev->hw_features | - NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; if (using_dac) netdev->features |= NETIF_F_HIGHDMA; @@ -3112,7 +3483,6 @@ bnad_init(struct bnad *bnad, bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len); if (!bnad->bar0) { dev_err(&pdev->dev, "ioremap for bar0 failed\n"); - pci_set_drvdata(pdev, NULL); return -ENOMEM; } pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0, @@ -3139,9 +3509,10 @@ bnad_init(struct bnad *bnad, sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); bnad->work_q = create_singlethread_workqueue(bnad->wq_name); - - if (!bnad->work_q) + if (!bnad->work_q) { + iounmap(bnad->bar0); return -ENOMEM; + } return 0; } @@ -3162,7 +3533,6 @@ bnad_uninit(struct bnad *bnad) if (bnad->bar0) iounmap(bnad->bar0); - pci_set_drvdata(bnad->pcidev, NULL); } /* @@ -3199,17 +3569,12 @@ bnad_pci_init(struct bnad *bnad, err = pci_request_regions(pdev, BNAD_NAME); if (err) goto disable_device; - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && - !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { *using_dac = true; } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) - goto release_regions; - } + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + goto release_regions; *using_dac = false; } pci_set_master(pdev); @@ -3230,7 +3595,7 @@ bnad_pci_uninit(struct pci_dev *pdev) pci_disable_device(pdev); } -static int __devinit +static int bnad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pcidev_id) { @@ -3272,6 +3637,7 @@ bnad_pci_probe(struct pci_dev *pdev, * Output : using_dac = 1 for 64 bit DMA * = 0 for 32 bit DMA */ + using_dac = false; err = bnad_pci_init(bnad, pdev, &using_dac); if (err) goto unlock_mutex; @@ -3323,7 +3689,6 @@ bnad_pci_probe(struct pci_dev *pdev, if (err) goto res_free; - /* Set up timers */ setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, ((unsigned long)bnad)); @@ -3429,7 +3794,7 @@ unlock_mutex: return err; } -static void __devexit +static void bnad_pci_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -3493,7 +3858,7 @@ static struct pci_driver bnad_pci_driver = { .name = BNAD_NAME, .id_table = bnad_pci_id_table, .probe = bnad_pci_probe, - .remove = __devexit_p(bnad_pci_remove), + .remove = bnad_pci_remove, }; static int __init diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index 72742be1127..2842c188e0d 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -71,7 +71,7 @@ struct bnad_rx_ctrl { #define BNAD_NAME "bna" #define BNAD_NAME_LEN 64 -#define BNAD_VERSION "3.0.23.0" +#define BNAD_VERSION "3.2.23.0" #define BNAD_MAILBOX_MSIX_INDEX 0 #define BNAD_MAILBOX_MSIX_VECTORS 1 @@ -83,12 +83,9 @@ struct bnad_rx_ctrl { #define BNAD_IOCETH_TIMEOUT 10000 -#define BNAD_MAX_Q_DEPTH 0x10000 -#define BNAD_MIN_Q_DEPTH 0x200 - -#define BNAD_MAX_RXQ_DEPTH (BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq) -/* keeping MAX TX and RX Q depth equal */ -#define BNAD_MAX_TXQ_DEPTH BNAD_MAX_RXQ_DEPTH +#define BNAD_MIN_Q_DEPTH 512 +#define BNAD_MAX_RXQ_DEPTH 16384 +#define BNAD_MAX_TXQ_DEPTH 2048 #define BNAD_JUMBO_MTU 9000 @@ -101,14 +98,16 @@ struct bnad_rx_ctrl { #define BNAD_TXQ_TX_STARTED 1 /* Bit positions for rcb->flags */ -#define BNAD_RXQ_REFILL 0 -#define BNAD_RXQ_STARTED 1 -#define BNAD_RXQ_POST_OK 2 +#define BNAD_RXQ_STARTED 0 +#define BNAD_RXQ_POST_OK 1 /* Resource limits */ #define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx) #define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx) +#define BNAD_FRAME_SIZE(_mtu) \ + (ETH_HLEN + VLAN_HLEN + (_mtu) + ETH_FCS_LEN) + /* * DATA STRUCTURES */ @@ -221,25 +220,56 @@ struct bnad_rx_info { struct work_struct rx_cleanup_work; } ____cacheline_aligned; -/* Unmap queues for Tx / Rx cleanup */ -struct bnad_skb_unmap { +struct bnad_tx_vector { + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +struct bnad_tx_unmap { struct sk_buff *skb; + u32 nvecs; + struct bnad_tx_vector vectors[BFI_TX_MAX_VECTORS_PER_WI]; +}; + +struct bnad_rx_vector { DEFINE_DMA_UNMAP_ADDR(dma_addr); + u32 len; }; -struct bnad_unmap_q { - u32 producer_index; - u32 consumer_index; - u32 q_depth; - /* This should be the last one */ - struct bnad_skb_unmap unmap_array[1]; +struct bnad_rx_unmap { + struct page *page; + struct sk_buff *skb; + struct bnad_rx_vector vector; + u32 page_offset; }; +enum bnad_rxbuf_type { + BNAD_RXBUF_NONE = 0, + BNAD_RXBUF_SK_BUFF = 1, + BNAD_RXBUF_PAGE = 2, + BNAD_RXBUF_MULTI_BUFF = 3 +}; + +#define BNAD_RXBUF_IS_SK_BUFF(_type) ((_type) == BNAD_RXBUF_SK_BUFF) +#define BNAD_RXBUF_IS_MULTI_BUFF(_type) ((_type) == BNAD_RXBUF_MULTI_BUFF) + +struct bnad_rx_unmap_q { + int reuse_pi; + int alloc_order; + u32 map_size; + enum bnad_rxbuf_type type; + struct bnad_rx_unmap unmap[0] ____cacheline_aligned; +}; + +#define BNAD_PCI_DEV_IS_CAT2(_bnad) \ + ((_bnad)->pcidev->device == BFA_PCI_DEVICE_ID_CT2) + /* Bit mask values for bnad->cfg_flags */ #define BNAD_CF_DIM_ENABLED 0x01 /* DIM */ #define BNAD_CF_PROMISC 0x02 #define BNAD_CF_ALLMULTI 0x04 -#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */ +#define BNAD_CF_DEFAULT 0x08 +#define BNAD_CF_MSIX 0x10 /* If in MSIx mode */ /* Defines for run_flags bit-mask */ /* Set, tested & cleared using xxx_bit() functions */ @@ -252,11 +282,6 @@ struct bnad_unmap_q { #define BNAD_RF_STATS_TIMER_RUNNING 5 #define BNAD_RF_TX_PRIO_SET 6 - -/* Define for Fast Path flags */ -/* Defined as bit positions */ -#define BNAD_FP_IN_RX_PATH 0 - struct bnad { struct net_device *netdev; u32 id; @@ -284,8 +309,8 @@ struct bnad { u8 tx_coalescing_timeo; u8 rx_coalescing_timeo; - struct bna_rx_config rx_config[BNAD_MAX_RX]; - struct bna_tx_config tx_config[BNAD_MAX_TX]; + struct bna_rx_config rx_config[BNAD_MAX_RX] ____cacheline_aligned; + struct bna_tx_config tx_config[BNAD_MAX_TX] ____cacheline_aligned; void __iomem *bar0; /* BAR0 address */ @@ -351,47 +376,43 @@ struct bnad_drvinfo { * EXTERN VARIABLES */ extern const struct firmware *bfi_fw; -extern u32 bnad_rxqs_per_cq; /* * EXTERN PROTOTYPES */ -extern u32 *cna_get_firmware_buf(struct pci_dev *pdev); +u32 *cna_get_firmware_buf(struct pci_dev *pdev); /* Netdev entry point prototypes */ -extern void bnad_set_rx_mode(struct net_device *netdev); -extern struct net_device_stats *bnad_get_netdev_stats( - struct net_device *netdev); -extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr); -extern int bnad_enable_default_bcast(struct bnad *bnad); -extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id); -extern void bnad_set_ethtool_ops(struct net_device *netdev); -extern void bnad_cb_completion(void *arg, enum bfa_status status); +void bnad_set_rx_mode(struct net_device *netdev); +struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev); +int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr); +int bnad_enable_default_bcast(struct bnad *bnad); +void bnad_restore_vlans(struct bnad *bnad, u32 rx_id); +void bnad_set_ethtool_ops(struct net_device *netdev); +void bnad_cb_completion(void *arg, enum bfa_status status); /* Configuration & setup */ -extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad); -extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad); +void bnad_tx_coalescing_timeo_set(struct bnad *bnad); +void bnad_rx_coalescing_timeo_set(struct bnad *bnad); -extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id); -extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id); -extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id); -extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id); +int bnad_setup_rx(struct bnad *bnad, u32 rx_id); +int bnad_setup_tx(struct bnad *bnad, u32 tx_id); +void bnad_destroy_tx(struct bnad *bnad, u32 tx_id); +void bnad_destroy_rx(struct bnad *bnad, u32 rx_id); /* Timer start/stop protos */ -extern void bnad_dim_timer_start(struct bnad *bnad); +void bnad_dim_timer_start(struct bnad *bnad); /* Statistics */ -extern void bnad_netdev_qstats_fill(struct bnad *bnad, - struct rtnl_link_stats64 *stats); -extern void bnad_netdev_hwstats_fill(struct bnad *bnad, - struct rtnl_link_stats64 *stats); +void bnad_netdev_qstats_fill(struct bnad *bnad, + struct rtnl_link_stats64 *stats); +void bnad_netdev_hwstats_fill(struct bnad *bnad, + struct rtnl_link_stats64 *stats); /* Debugfs */ -void bnad_debugfs_init(struct bnad *bnad); -void bnad_debugfs_uninit(struct bnad *bnad); +void bnad_debugfs_init(struct bnad *bnad); +void bnad_debugfs_uninit(struct bnad *bnad); -/** - * MACROS - */ +/* MACROS */ /* To set & get the stats counters */ #define BNAD_UPDATE_CTR(_bnad, _ctr) \ (((_bnad)->stats.drv_stats._ctr)++) diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 6e8bc9d88c4..7d6aa8c87df 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -230,32 +230,12 @@ bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file) static loff_t bnad_debugfs_lseek(struct file *file, loff_t offset, int orig) { - loff_t pos = file->f_pos; struct bnad_debug_info *debug = file->private_data; if (!debug) return -EINVAL; - switch (orig) { - case 0: - file->f_pos = offset; - break; - case 1: - file->f_pos += offset; - break; - case 2: - file->f_pos = debug->buffer_len - offset; - break; - default: - return -EINVAL; - } - - if (file->f_pos < 0 || file->f_pos > debug->buffer_len) { - file->f_pos = pos; - return -EINVAL; - } - - return file->f_pos; + return fixed_size_llseek(file, offset, orig, debug->buffer_len); } static ssize_t diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 40e1e84f498..882cad71ad6 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -102,6 +102,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { "rx_unmap_q_alloc_failed", "rxbuf_alloc_failed", + "mac_stats_clr_cnt", "mac_frame_64", "mac_frame_65_127", "mac_frame_128_255", @@ -265,8 +266,8 @@ bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) ethtool_cmd_speed_set(cmd, SPEED_10000); cmd->duplex = DUPLEX_FULL; } else { - ethtool_cmd_speed_set(cmd, -1); - cmd->duplex = -1; + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->duplex = DUPLEX_UNKNOWN; } cmd->transceiver = XCVR_EXTERNAL; cmd->maxtxpkt = 0; @@ -1130,10 +1131,11 @@ static const struct ethtool_ops bnad_ethtool_ops = { .get_eeprom = bnad_get_eeprom, .set_eeprom = bnad_set_eeprom, .flash_device = bnad_flash_device, + .get_ts_info = ethtool_op_get_ts_info, }; void bnad_set_ethtool_ops(struct net_device *netdev) { - SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops); + netdev->ethtool_ops = &bnad_ethtool_ops; } diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h index 32e8f178ab7..b3ff6d50795 100644 --- a/drivers/net/ethernet/brocade/bna/cna.h +++ b/drivers/net/ethernet/brocade/bna/cna.h @@ -37,8 +37,8 @@ extern char bfa_version[]; -#define CNA_FW_FILE_CT "ctfw.bin" -#define CNA_FW_FILE_CT2 "ct2fw.bin" +#define CNA_FW_FILE_CT "ctfw-3.2.3.0.bin" +#define CNA_FW_FILE_CT2 "ct2fw-3.2.3.0.bin" #define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ #pragma pack(1) diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c index cfc22a64157..6a68e8d9330 100644 --- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c +++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c @@ -67,10 +67,10 @@ bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off) { switch (asic_gen) { case BFI_ASIC_GEN_CT: - return (u32 *)(bfi_image_ct_cna + off); + return (bfi_image_ct_cna + off); break; case BFI_ASIC_GEN_CT2: - return (u32 *)(bfi_image_ct2_cna + off); + return (bfi_image_ct2_cna + off); break; default: return NULL; |
