diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-13 10:57:01 +0900 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-13 10:57:01 +0900 |
commit | 6a5a3d6a4adde0c66f3be29bbd7c0d6ffb7e1a40 (patch) | |
tree | ae416ffa4458df755f984a05d65ee1c3e220c40b /drivers/scsi/bfa | |
parent | 8bbbfa70549bd84f29ff331d0ac051897ccbbd72 (diff) | |
parent | 5c1b10ab7f93d24f29b5630286e323d1c5802d5c (diff) |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull misc SCSI updates from James Bottomley:
"This is an assorted set of stragglers into the merge window with
driver updates for megaraid_sas, lpfc, bfi and mvumi. It also
includes some fairly major fixes for virtio-scsi (scatterlist init),
scsi_debug (off by one error), storvsc (use after free) and qla2xxx
(potential deadlock).
Signed-off-by: James Bottomley <JBottomley@Parallels.com>"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (49 commits)
[SCSI] storvsc: Account for in-transit packets in the RESET path
[SCSI] qla2xxx: fix potential deadlock on ha->hardware_lock
[SCSI] scsi_debug: Fix off-by-one bug when unmapping region
[SCSI] Shorten the path length of scsi_cmd_to_driver()
[SCSI] virtio-scsi: support online resizing of disks
[SCSI] virtio-scsi: fix LUNs greater than 255
[SCSI] virtio-scsi: initialize scatterlist structure
[SCSI] megaraid_sas: Version, Changelog, Copyright update
[SCSI] megaraid_sas: Remove duplicate code
[SCSI] megaraid_sas: Add SystemPD FastPath support
[SCSI] megaraid_sas: Add array boundary check for SystemPD
[SCSI] megaraid_sas: Load io_request DataLength in bytes
[SCSI] megaraid_sas: Add module param for configurable MSI-X vector count
[SCSI] megaraid_sas: Remove un-needed completion_lock spinlock calls
[SCSI] lpfc 8.3.35: Update lpfc version for 8.3.35 driver release
[SCSI] lpfc 8.3.35: Fixed not reporting logical link speed to SCSI midlayer when QoS not on
[SCSI] lpfc 8.3.35: Fix error with fabric service parameters causing performance issues
[SCSI] lpfc 8.3.35: Fixed SCSI host create showing wrong link speed on SLI3 HBA ports
[SCSI] lpfc 8.3.35: Fixed not checking solicition in progress bit when verifying FCF record for use
[SCSI] lpfc 8.3.35: Fixed messages for misconfigured port errors
...
Diffstat (limited to 'drivers/scsi/bfa')
26 files changed, 2764 insertions, 302 deletions
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index b7c326f7a6d..342d7d9c099 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -165,6 +165,16 @@ bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg); } +static void +bfa_com_fru_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) +{ + struct bfa_fru_s *fru = BFA_FRU(bfa); + struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa); + + bfa_fru_attach(fru, &bfa->ioc, bfa, bfa->trcmod, mincfg); + bfa_fru_memclaim(fru, fru_dma->kva_curp, fru_dma->dma_curp, mincfg); +} + /* * BFA IOC FC related definitions */ @@ -274,6 +284,15 @@ bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event) case IOCFC_E_IOC_ENABLED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); break; + + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); + break; + case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); break; @@ -298,6 +317,15 @@ bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event) case IOCFC_E_DCONF_DONE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait); break; + + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); + break; + case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); break; @@ -322,6 +350,15 @@ bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) case IOCFC_E_CFG_DONE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done); break; + + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); + break; + case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); break; @@ -433,6 +470,12 @@ bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event) bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, iocfc->bfa); break; + + case IOCFC_E_IOC_ENABLED: + case IOCFC_E_DCONF_DONE: + case IOCFC_E_CFG_DONE: + break; + default: bfa_sm_fault(iocfc->bfa, event); break; @@ -454,6 +497,15 @@ bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) case IOCFC_E_IOC_ENABLED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); break; + + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); + break; + case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); @@ -493,6 +545,13 @@ bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) bfa_iocfc_enable_cb, iocfc->bfa); iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; break; + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); + break; case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) @@ -524,6 +583,10 @@ bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) case IOCFC_E_IOC_DISABLED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled); break; + case IOCFC_E_IOC_ENABLED: + case IOCFC_E_DCONF_DONE: + case IOCFC_E_CFG_DONE: + break; default: bfa_sm_fault(iocfc->bfa, event); break; @@ -785,19 +848,20 @@ void bfa_isr_enable(struct bfa_s *bfa) { u32 umsk; - int pci_func = bfa_ioc_pcifn(&bfa->ioc); + int port_id = bfa_ioc_portid(&bfa->ioc); - bfa_trc(bfa, pci_func); + bfa_trc(bfa, bfa_ioc_pcifn(&bfa->ioc)); + bfa_trc(bfa, port_id); bfa_msix_ctrl_install(bfa); if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { umsk = __HFN_INT_ERR_MASK_CT2; - umsk |= pci_func == 0 ? + umsk |= port_id == 0 ? __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2; } else { umsk = __HFN_INT_ERR_MASK; - umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK; + umsk |= port_id == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK; } writel(umsk, bfa->iocfc.bfa_regs.intr_status); @@ -930,7 +994,8 @@ bfa_iocfc_send_cfg(void *bfa_arg) cfg_info->single_msix_vec = 1; cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; cfg_info->num_cqs = cfg->fwcfg.num_cqs; - cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs); + cfg_info->num_ioim_reqs = cpu_to_be16(bfa_fcpim_get_throttle_cfg(bfa, + cfg->fwcfg.num_ioim_reqs)); cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs); bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); @@ -1192,10 +1257,14 @@ bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg) static void bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg) { + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; + bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs); bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs); bfa_rport_res_recfg(bfa, fwcfg->num_rports); - bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs); + bfa_fcp_res_recfg(bfa, cpu_to_be16(cfg_info->num_ioim_reqs), + fwcfg->num_ioim_reqs); bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs); } @@ -1693,6 +1762,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); + struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa); WARN_ON((cfg == NULL) || (meminfo == NULL)); @@ -1717,6 +1787,8 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo()); bfa_mem_dma_setup(meminfo, phy_dma, bfa_phy_meminfo(cfg->drvcfg.min_cfg)); + bfa_mem_dma_setup(meminfo, fru_dma, + bfa_fru_meminfo(cfg->drvcfg.min_cfg)); } /* @@ -1789,6 +1861,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg); bfa_com_diag_attach(bfa); bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg); + bfa_com_fru_attach(bfa, cfg->drvcfg.min_cfg); } /* diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h index b5a1595cc0a..0efdf312b42 100644 --- a/drivers/scsi/bfa/bfa_defs.h +++ b/drivers/scsi/bfa/bfa_defs.h @@ -159,10 +159,13 @@ enum bfa_status { BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */ BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */ BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */ + BFA_STATUS_ERROR_TRL_ENABLED = 87, /* TRL is enabled */ + BFA_STATUS_ERROR_QOS_ENABLED = 88, /* QoS is enabled */ BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */ BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */ BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */ BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */ + BFA_STATUS_CMD_NOTSUPP_CNA = 146, /* Command not supported for CNA */ BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot * configuration */ BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */ @@ -184,6 +187,17 @@ enum bfa_status { BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */ BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */ BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */ + BFA_STATUS_TOPOLOGY_LOOP = 230, /* Topology is set to Loop */ + BFA_STATUS_LOOP_UNSUPP_MEZZ = 231, /* Loop topology is not supported + * on mezz cards */ + BFA_STATUS_INVALID_BW = 233, /* Invalid bandwidth value */ + BFA_STATUS_QOS_BW_INVALID = 234, /* Invalid QOS bandwidth + * configuration */ + BFA_STATUS_DPORT_ENABLED = 235, /* D-port mode is already enabled */ + BFA_STATUS_DPORT_DISABLED = 236, /* D-port mode is already disabled */ + BFA_STATUS_CMD_NOTSUPP_MEZZ = 239, /* Cmd not supported for MEZZ card */ + BFA_STATUS_FRU_NOT_PRESENT = 240, /* fru module not present */ + BFA_STATUS_DPORT_ERR = 245, /* D-port mode is enabled */ BFA_STATUS_MAX_VAL /* Unknown error code */ }; #define bfa_status_t enum bfa_status @@ -249,6 +263,10 @@ struct bfa_adapter_attr_s { u8 is_mezz; u8 trunk_capable; + u8 mfg_day; /* manufacturing day */ + u8 mfg_month; /* manufacturing month */ + u16 mfg_year; /* manufacturing year */ + u16 rsvd; }; /* @@ -499,6 +517,17 @@ struct bfa_ioc_aen_data_s { }; /* + * D-port states + * +*/ +enum bfa_dport_state { + BFA_DPORT_ST_DISABLED = 0, /* D-port is Disabled */ + BFA_DPORT_ST_DISABLING = 1, /* D-port is Disabling */ + BFA_DPORT_ST_ENABLING = 2, /* D-port is Enabling */ + BFA_DPORT_ST_ENABLED = 3, /* D-port is Enabled */ +}; + +/* * ---------------------- mfg definitions ------------ */ @@ -722,7 +751,8 @@ struct bfa_ablk_cfg_pf_s { u8 rsvd[1]; u16 num_qpairs; u16 num_vectors; - u32 bw; + u16 bw_min; + u16 bw_max; }; struct bfa_ablk_cfg_port_s { @@ -889,11 +919,40 @@ struct sfp_diag_ext_s { u8 ext_status_ctl[2]; }; +/* + * Diagnostic: Data Fields -- Address A2h + * General Use Fields: User Writable Table - Features's Control Registers + * Total 32 bytes + */ +struct sfp_usr_eeprom_s { + u8 rsvd1[2]; /* 128-129 */ + u8 ewrap; /* 130 */ + u8 rsvd2[2]; /* */ + u8 owrap; /* 133 */ + u8 rsvd3[2]; /* */ + u8 prbs; /* 136: PRBS 7 generator */ + u8 rsvd4[2]; /* */ + u8 tx_eqz_16; /* 139: TX Equalizer (16xFC) */ + u8 tx_eqz_8; /* 140: TX Equalizer (8xFC) */ + u8 rsvd5[2]; /* */ + u8 rx_emp_16; /* 143: RX Emphasis (16xFC) */ + u8 rx_emp_8; /* 144: RX Emphasis (8xFC) */ + u8 rsvd6[2]; /* */ + u8 tx_eye_adj; /* 147: TX eye Threshold Adjust */ + u8 rsvd7[3]; /* */ + u8 tx_eye_qctl; /* 151: TX eye Quality Control */ + u8 tx_eye_qres; /* 152: TX eye Quality Result */ + u8 rsvd8[2]; /* */ + u8 poh[3]; /* 155-157: Power On Hours */ + u8 rsvd9[2]; /* */ +}; + struct sfp_mem_s { struct sfp_srlid_base_s srlid_base; struct sfp_srlid_ext_s srlid_ext; struct sfp_diag_base_s diag_base; struct sfp_diag_ext_s diag_ext; + struct sfp_usr_eeprom_s usr_eeprom; }; /* diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h index 36756ce0e58..ec03c8cd8da 100644 --- a/drivers/scsi/bfa/bfa_defs_svc.h +++ b/drivers/scsi/bfa/bfa_defs_svc.h @@ -258,6 +258,7 @@ struct bfa_fw_port_lksm_stats_s { u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */ u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */ u32 bbsc_lr; /* LKSM LR tx for credit recovery */ + u32 rsvd; }; struct bfa_fw_port_snsm_stats_s { @@ -270,6 +271,9 @@ struct bfa_fw_port_snsm_stats_s { u32 sync_lost; /* Sync loss count */ u32 sig_lost; /* Signal loss count */ u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */ + u32 adapt_success; /* SNSM adaptation success */ + u32 adapt_fails; /* SNSM adaptation failures */ + u32 adapt_ign_fails; /* SNSM adaptation failures ignored */ }; struct bfa_fw_port_physm_stats_s { @@ -324,12 +328,46 @@ struct bfa_fw_fcoe_port_stats_s { struct bfa_fw_fip_stats_s fip_stats; }; +/** + * @brief LPSM statistics + */ +struct bfa_fw_lpsm_stats_s { + u32 cls_rx; /* LPSM cls_rx */ + u32 cls_tx; /* LPSM cls_tx */ + u32 arbf0_rx; /* LPSM abrf0 rcvd */ + u32 arbf0_tx; /* LPSM abrf0 xmit */ + u32 init_rx; /* LPSM loop init start */ + u32 unexp_hwst; /* LPSM unknown hw state */ + u32 unexp_frame; /* LPSM unknown_frame */ + u32 unexp_prim; /* LPSM unexpected primitive */ + u32 prev_alpa_unavail; /* LPSM prev alpa unavailable */ + u32 alpa_unavail; /* LPSM alpa not available */ + u32 lip_rx; /* LPSM lip rcvd */ + u32 lip_f7f7_rx; /* LPSM lip f7f7 rcvd */ + u32 lip_f8_rx; /* LPSM lip f8 rcvd */ + u32 lip_f8f7_rx; /* LPSM lip f8f7 rcvd */ + u32 lip_other_rx; /* LPSM lip other rcvd */ + u32 lip_tx; /* LPSM lip xmit */ + u32 retry_tov; /* LPSM retry TOV */ + u32 lip_tov; /* LPSM LIP wait TOV */ + u32 idle_tov; /* LPSM idle wait TOV */ + u32 arbf0_tov; /* LPSM arbfo wait TOV */ + u32 stop_loop_tov; /* LPSM stop loop wait TOV */ + u32 lixa_tov; /* LPSM lisa wait TOV */ + u32 lixx_tov; /* LPSM lilp/lirp wait TOV */ + u32 cls_tov; /* LPSM cls wait TOV */ + u32 sler; /* LPSM SLER recvd */ + u32 failed; /* LPSM failed */ + u32 success; /* LPSM online */ +}; + /* * IOC firmware FC uport stats */ struct bfa_fw_fc_uport_stats_s { struct bfa_fw_port_snsm_stats_s snsm_stats; struct bfa_fw_port_lksm_stats_s lksm_stats; + struct bfa_fw_lpsm_stats_s lpsm_stats; }; /* @@ -357,11 +395,6 @@ struct bfa_fw_fcxchg_stats_s { u32 ua_state_inv; }; -struct bfa_fw_lpsm_stats_s { - u32 cls_rx; - u32 cls_tx; -}; - /* * Trunk statistics */ @@ -454,7 +487,6 @@ struct bfa_fw_stats_s { struct bfa_fw_io_stats_s io_stats; struct bfa_fw_port_stats_s port_stats; struct bfa_fw_fcxchg_stats_s fcxchg_stats; - struct bfa_fw_lpsm_stats_s lpsm_stats; struct bfa_fw_lps_stats_s lps_stats; struct bfa_fw_trunk_stats_s trunk_stats; struct bfa_fw_advsm_stats_s advsm_stats; @@ -494,13 +526,23 @@ enum bfa_qos_bw_alloc { BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */ }; #pragma pack(1) + +struct bfa_qos_bw_s { + u8 qos_bw_set; + u8 high; + u8 med; + u8 low; +}; + /* * QoS attribute returned in QoS Query */ struct bfa_qos_attr_s { - u8 state; /* QoS current state */ - u8 rsvd[3]; - u32 total_bb_cr; /* Total BB Credits */ + u8 state; /* QoS current state */ + u8 rsvd1[3]; + u32 total_bb_cr; /* Total BB Credits */ + struct bfa_qos_bw_s qos_bw; /* QOS bw cfg */ + struct bfa_qos_bw_s qos_bw_op; /* QOS bw operational */ }; /* @@ -692,7 +734,8 @@ enum bfa_port_states { BFA_PORT_ST_FWMISMATCH = 12, BFA_PORT_ST_PREBOOT_DISABLED = 13, BFA_PORT_ST_TOGGLING_QWAIT = 14, - BFA_PORT_ST_ACQ_ADDR = 15, + BFA_PORT_ST_FAA_MISCONFIG = 15, + BFA_PORT_ST_DPORT = 16, BFA_PORT_ST_MAX_STATE, }; @@ -714,9 +757,11 @@ enum bfa_port_type { */ enum bfa_port_topology { BFA_PORT_TOPOLOGY_NONE = 0, /* No valid topology */ - BFA_PORT_TOPOLOGY_P2P = 1, /* P2P only */ - BFA_PORT_TOPOLOGY_LOOP = 2, /* LOOP topology */ - BFA_PORT_TOPOLOGY_AUTO = 3, /* auto topology selection */ + BFA_PORT_TOPOLOGY_P2P_OLD_VER = 1, /* P2P def for older ver */ + BFA_PORT_TOPOLOGY_LOOP = 2, /* LOOP topology */ + BFA_PORT_TOPOLOGY_AUTO_OLD_VER = 3, /* auto def for older ver */ + BFA_PORT_TOPOLOGY_AUTO = 4, /* auto topology selection */ + BFA_PORT_TOPOLOGY_P2P = 5, /* P2P only */ }; /* @@ -760,6 +805,7 @@ enum bfa_port_linkstate_rsn { BFA_PORT_LINKSTATE_RSN_LOCAL_FAULT = 9, BFA_PORT_LINKSTATE_RSN_REMOTE_FAULT = 10, BFA_PORT_LINKSTATE_RSN_TIMEOUT = 11, + BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG = 12, @@ -833,6 +879,19 @@ struct bfa_lunmask_cfg_s { struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG]; }; +struct bfa_throttle_cfg_s { + u16 is_valid; + u16 value; + u32 rsvd; +}; + +struct bfa_defs_fcpim_throttle_s { + u16 max_value; + u16 cur_value; + u16 cfg_value; + u16 rsvd; +}; + /* * Physical port configuration */ @@ -851,9 +910,10 @@ struct bfa_port_cfg_s { u8 bb_scn; /* BB_SCN value from FLOGI Exchg */ u8 bb_scn_state; /* Config state of BB_SCN */ u8 faa_state; /* FAA enabled/disabled */ - u8 rsvd[1]; + u8 rsvd1; u16 path_tov; /* device path timeout */ u16 q_depth; /* SCSI Queue depth */ + struct bfa_qos_bw_s qos_bw; /* QOS bandwidth */ }; #pragma pack() @@ -901,7 +961,7 @@ struct bfa_port_attr_s { /* FCoE specific */ u16 fcoe_vlan; - u8 rsvd1[2]; + u8 rsvd1[6]; }; /* @@ -971,6 +1031,13 @@ struct bfa_trunk_vc_attr_s { u16 vc_credits[8]; }; +struct bfa_fcport_loop_info_s { + u8 myalpa; /* alpa claimed */ + u8 alpabm_val; /* alpa bitmap valid or not (1 or 0) */ + u8 resvd[6]; + struct fc_alpabm_s alpabm; /* alpa bitmap */ +}; + /* * Link state information */ @@ -981,13 +1048,18 @@ struct bfa_port_link_s { u8 speed; /* Link speed (1/2/4/8 G) */ u32 linkstate_opt; /* Linkstate optional data (debug) */ u8 trunked; /* Trunked or not (1 or 0) */ - u8 resvd[3]; + u8 resvd[7]; struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ union { - struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ - struct bfa_trunk_vc_attr_s trunk_vc_attr; - struct bfa_fcport_fcf_s fcf; /* FCF information (for FCoE) */ - } vc_fcf; + struct bfa_fcport_loop_info_s loop_info; + union { + struct bfa_qos_vc_attr_s qos_vc_attr; + /* VC info from ELP */ + struct bfa_trunk_vc_attr_s trunk_vc_attr; + struct bfa_fcport_fcf_s fcf; + /* FCF information (for FCoE) */ + } vc_fcf; + } attr; }; #pragma pack() @@ -1112,6 +1184,9 @@ struct bfa_port_fc_stats_s { u64 tx_frames; /* Tx frames */ u64 tx_words; /* Tx words */ u64 tx_lip; /* Tx LIP */ + u64 tx_lip_f7f7; /* Tx LIP_F7F7 */ + u64 tx_lip_f8f7; /* Tx LIP_F8F7 */ + u64 tx_arbf0; /* Tx ARB F0 */ u64 tx_nos; /* Tx NOS */ u64 tx_ols; /* Tx OLS */ u64 tx_lr; /* Tx LR */ @@ -1119,6 +1194,9 @@ struct bfa_port_fc_stats_s { u64 rx_frames; /* Rx frames */ u64 rx_words; /* Rx words */ u64 lip_count; /* Rx LIP */ + u64 rx_lip_f7f7; /* Rx LIP_F7F7 */ + u64 rx_lip_f8f7; /* Rx LIP_F8F7 */ + u64 rx_arbf0; /* Rx ARB F0 */ u64 nos_count; /* Rx NOS */ u64 ols_count; /* Rx OLS */ u64 lr_count; /* Rx LR */ @@ -1140,6 +1218,7 @@ struct bfa_port_fc_stats_s { u64 bbsc_frames_lost; /* Credit Recovery-Frames Lost */ u64 bbsc_credits_lost; /* Credit Recovery-Credits Lost */ u64 bbsc_link_resets; /* Credit Recovery-Link Resets */ + u64 loop_timeouts; /* Loop timeouts */ }; /* diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h index e0beb4d7e26..bea821b9803 100644 --- a/drivers/scsi/bfa/bfa_fc.h +++ b/drivers/scsi/bfa/bfa_fc.h @@ -24,6 +24,7 @@ typedef u64 wwn_t; #define WWN_NULL (0) #define FC_SYMNAME_MAX 256 /* max name server symbolic name size */ +#define FC_ALPA_MAX 128 #pragma pack(1) @@ -1015,6 +1016,10 @@ struct fc_symname_s { u8 symname[FC_SYMNAME_MAX]; }; +struct fc_alpabm_s { + u8 alpa_bm[FC_ALPA_MAX / 8]; +}; + /* * protocol default timeout values */ diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c index 273cee90b3b..dce787f6cca 100644 --- a/drivers/scsi/bfa/bfa_fcbuild.c +++ b/drivers/scsi/bfa/bfa_fcbuild.c @@ -228,6 +228,10 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); + /* For FC AL bb_cr is 0 and altbbcred is 1 */ + if (!bb_cr) + plogi->csp.altbbcred = 1; + plogi->els_cmd.els_code = els_code; if (els_code == FC_ELS_PLOGI) fc_els_req_build(fchs, d_id, s_id, ox_id); diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 1633963c66c..27b56096235 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -158,6 +158,7 @@ enum bfa_tskim_event { BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */ BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */ BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ + BFA_TSKIM_SM_UTAG = 10, /* TM completion unknown tag */ }; /* @@ -3036,7 +3037,7 @@ bfa_ioim_abort(struct bfa_ioim_s *ioim) static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) { - bfa_trc(tskim->bfa, event); + bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); switch (event) { case BFA_TSKIM_SM_START: @@ -3074,7 +3075,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) { - bfa_trc(tskim->bfa, event); + bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); switch (event) { case BFA_TSKIM_SM_DONE: @@ -3110,7 +3111,7 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) { - bfa_trc(tskim->bfa, event); + bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); switch (event) { case BFA_TSKIM_SM_DONE: @@ -3119,6 +3120,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) */ break; + case BFA_TSKIM_SM_UTAG: case BFA_TSKIM_SM_CLEANUP_DONE: bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); bfa_tskim_cleanup_ios(tskim); @@ -3138,7 +3140,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) { - bfa_trc(tskim->bfa, event); + bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); switch (event) { case BFA_TSKIM_SM_IOS_DONE: @@ -3170,7 +3172,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) { - bfa_trc(tskim->bfa, event); + bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); switch (event) { case BFA_TSKIM_SM_QRESUME: @@ -3207,7 +3209,7 @@ static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) { - bfa_trc(tskim->bfa, event); + bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); switch (event) { case BFA_TSKIM_SM_DONE: @@ -3238,7 +3240,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) { - bfa_trc(tskim->bfa, event); + bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); switch (event) { case BFA_TSKIM_SM_HCB: @@ -3560,6 +3562,8 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) { bfa_stats(tskim->itnim, tm_cleanup_comps); bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE); + } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) { + bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG); } else { bfa_stats(tskim->itnim, tm_fw_rsps); bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE); @@ -3699,6 +3703,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_mem_dma_s *seg_ptr; u16 idx, nsegs, num_io_req; + fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs; fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs; fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs; fcp->num_itns = cfg->fwcfg.num_rports; @@ -3721,6 +3726,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa); } + fcp->throttle_update_required = 1; bfa_fcpim_attach(fcp, bfad, cfg, pcidev); bfa_iotag_attach(fcp); @@ -3759,23 +3765,33 @@ bfa_fcp_iocdisable(struct bfa_s *bfa) { struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); - /* Enqueue unused ioim resources to free_q */ - list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q); - bfa_fcpim_iocdisable(fcp); } void -bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw) +bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw) { struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa); struct list_head *qe; int i; + /* Update io throttle value only once during driver load time */ + if (!mod->throttle_update_required) + return; + for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) { bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe); list_add_tail(qe, &mod->iotag_unused_q); } + + if (mod->num_ioim_reqs != num_ioim_fw) { + bfa_trc(bfa, mod->num_ioim_reqs); + bfa_trc(bfa, num_ioim_fw); + } + + mod->max_ioim_reqs = max_ioim_fw; + mod->num_ioim_reqs = num_ioim_fw; + mod->throttle_update_required = 0; } void @@ -3833,3 +3849,88 @@ bfa_iotag_attach(struct bfa_fcp_mod_s *fcp) bfa_mem_kva_curp(fcp) = (u8 *) iotag; } + + +/** + * To send config req, first try to use throttle value from flash + * If 0, then use driver parameter + * We need to use min(flash_val, drv_val) because + * memory allocation was done based on this cfg'd value + */ +u16 +bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param) +{ + u16 tmp; + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); + + /* + * If throttle value from flash is already in effect after driver is + * loaded then until next load, always return current value instead + * of actual flash value + */ + if (!fcp->throttle_update_required) + return (u16)fcp->num_ioim_reqs; + + tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0; + if (!tmp || (tmp > drv_cfg_param)) + tmp = drv_cfg_param; + + return tmp; +} + +bfa_status_t +bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value) +{ + if (!bfa_dconf_get_min_cfg(bfa)) { + BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value; + BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1; + return BFA_STATUS_OK; + } + + return BFA_STATUS_FAILED; +} + +u16 +bfa_fcpim_read_throttle(struct bfa_s *bfa) +{ + struct bfa_throttle_cfg_s *throttle_cfg = + &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg); + + return ((!bfa_dconf_get_min_cfg(bfa)) ? + ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0); +} + +bfa_status_t +bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value) +{ + /* in min cfg no commands should run. */ + if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) || + (!bfa_dconf_read_data_valid(bfa))) + return BFA_STATUS_FAILED; + + bfa_fcpim_write_throttle(bfa, value); + + return bfa_dconf_update(bfa); +} + +bfa_status_t +bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + struct bfa_defs_fcpim_throttle_s throttle; + + if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) || + (!bfa_dconf_read_data_valid(bfa))) + return BFA_STATUS_FAILED; + + memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s)); + + throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs); + throttle.cfg_value = bfa_fcpim_read_throttle(bfa); + if (!throttle.cfg_value) + throttle.cfg_value = throttle.cur_value; + throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs); + memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s)); + + return BFA_STATUS_OK; +} diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h index 36f26da80f7..e693af6e593 100644 --- a/drivers/scsi/bfa/bfa_fcpim.h +++ b/drivers/scsi/bfa/bfa_fcpim.h @@ -42,7 +42,7 @@ void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)); void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m); void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp); -void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw); +void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw); #define BFA_FCP_MOD(_hal) (&(_hal)->modules.fcp_mod) #define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg)) @@ -51,7 +51,9 @@ void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw); #define BFA_ITN_FROM_TAG(_fcp, _tag) \ ((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1))) #define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \ - bfa_mem_get_dmabuf_kva(_fcp, _tag, BFI_IOIM_SNSLEN) + bfa_mem_get_dmabuf_kva(_fcp, (_tag & BFA_IOIM_IOTAG_MASK), \ + BFI_IOIM_SNSLEN) + #define BFA_ITNIM_MIN 32 #define BFA_ITNIM_MAX 1024 @@ -148,6 +150,7 @@ struct bfa_fcp_mod_s { struct list_head iotag_unused_q; /* unused IO resources*/ struct bfa_iotag_s *iotag_arr; struct bfa_itn_s *itn_arr; + int max_ioim_reqs; int num_ioim_reqs; int num_fwtio_reqs; int num_itns; @@ -155,6 +158,7 @@ struct bfa_fcp_mod_s { struct bfa_fcpim_s fcpim; struct bfa_mem_dma_s dma_seg[BFA_FCP_DMA_SEGS]; struct bfa_mem_kva_s kva_seg; + int throttle_update_required; }; /* @@ -416,5 +420,10 @@ bfa_status_t bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun); bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa); +u16 bfa_fcpim_read_throttle(struct bfa_s *bfa); +bfa_status_t bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value); +bfa_status_t bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value); +bfa_status_t bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf); +u16 bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param); #endif /* __BFA_FCPIM_H__ */ diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index fd3e84d32bd..d428808fb37 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c @@ -303,16 +303,30 @@ static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { + struct bfa_s *bfa = fabric->fcs->bfa; + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_START: - if (bfa_fcport_is_linkup(fabric->fcs->bfa)) { + if (!bfa_fcport_is_linkup(fabric->fcs->bfa)) { + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + break; + } + if (bfa_fcport_get_topology(bfa) == + BFA_PORT_TOPOLOGY_LOOP) { + fabric->fab_type = BFA_FCS_FABRIC_LOOP; + fabric->bport.pid = bfa_fcport_get_myalpa(bfa); + fabric->bport.pid = bfa_hton3b(fabric->bport.pid); + bfa_sm_set_state(fabric, + bfa_fcs_fabric_sm_online); + bfa_fcs_fabric_set_opertype(fabric); + bfa_fcs_lport_online(&fabric->bport); + } else { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); - } else - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + } break; case BFA_FCS_FABRIC_SM_LINK_UP: @@ -337,16 +351,28 @@ static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { + struct bfa_s *bfa = fabric->fcs->bfa; + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_UP: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); - bfa_fcs_fabric_login(fabric); + if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP) { + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); + bfa_fcs_fabric_login(fabric); + break; + } + fabric->fab_type = BFA_FCS_FABRIC_LOOP; + fabric->bport.pid = bfa_fcport_get_myalpa(bfa); + fabric->bport.pid = bfa_hton3b(fabric->bport.pid); + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); + bfa_fcs_fabric_set_opertype(fabric); + bfa_fcs_lport_online(&fabric->bport); break; case BFA_FCS_FABRIC_SM_RETRY_OP: + case BFA_FCS_FABRIC_SM_LOOPBACK: break; case BFA_FCS_FABRIC_SM_DELETE: @@ -595,14 +621,20 @@ void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { + struct bfa_s *bfa = fabric->fcs->bfa; + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); - bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); - bfa_fcs_fabric_notify_offline(fabric); + if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) { + bfa_fcs_lport_offline(&fabric->bport); + } else { + bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); + bfa_fcs_fabric_notify_offline(fabric); + } break; case BFA_FCS_FABRIC_SM_DELETE: @@ -719,20 +751,29 @@ static void bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { + struct bfa_s *bfa = fabric->fcs->bfa; + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_STOPCOMP: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); - bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT); + if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) { + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); + } else { + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); + bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT); + } break; case BFA_FCS_FABRIC_SM_LINK_UP: break; case BFA_FCS_FABRIC_SM_LINK_DOWN: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); + if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); + else + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); break; default: @@ -975,9 +1016,6 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric) struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; u8 alpa = 0, bb_scn = 0; - if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) - alpa = bfa_fcport_get_myalpa(bfa); - if (bfa_fcs_fabric_is_bbscn_enabled(fabric) && (!fabric->fcs->bbscn_flogi_rjt)) bb_scn = BFA_FCS_PORT_DEF_BB_SCN; diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 6c4377cb287..a449706c6bc 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -118,9 +118,9 @@ struct bfa_fcs_lport_fab_s { #define MAX_ALPA_COUNT 127 struct bfa_fcs_lport_loop_s { - u8 num_alpa; /* Num of ALPA entries in the map */ - u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional - *Map */ + u8 num_alpa; /* Num of ALPA entries in the map */ + u8 alpabm_valid; /* alpa bitmap valid or not (1 or 0) */ + u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional Map */ struct bfa_fcs_lport_s *port; /* parent port */ }; @@ -175,6 +175,7 @@ enum bfa_fcs_fabric_type { BFA_FCS_FABRIC_UNKNOWN = 0, BFA_FCS_FABRIC_SWITCHED = 1, BFA_FCS_FABRIC_N2N = 2, + BFA_FCS_FABRIC_LOOP = 3, }; @@ -350,9 +351,10 @@ void bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced); void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport); void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport); -void bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *vport); +void bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *vport); void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port, struct fchs_s *rx_frame, u32 len); +void bfa_fcs_lport_lip_scn_online(bfa_fcs_lport_t *port); struct bfa_fcs_vport_s { struct list_head qe; /* queue elem */ @@ -453,6 +455,7 @@ struct bfa_fcs_rport_s { struct bfa_rport_stats_s stats; /* rport stats */ enum bfa_rport_function scsi_function; /* Initiator/Target */ struct bfa_fcs_rpf_s rpf; /* Rport features module */ + bfa_boolean_t scn_online; /* SCN online flag */ }; static inline struct bfa_rport_s * @@ -639,9 +642,9 @@ struct bfa_fcs_fdmi_hba_attr_s { u8 model[16]; u8 model_desc[256]; u8 hw_version[8]; - u8 driver_version[8]; + u8 driver_version[BFA_VERSION_LEN]; u8 option_rom_ver[BFA_VERSION_LEN]; - u8 fw_version[8]; + u8 fw_version[BFA_VERSION_LEN]; u8 os_name[256]; __be32 max_ct_pyld; }; @@ -733,7 +736,7 @@ enum rport_event { RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */ RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */ RPSM_EVENT_DELETE = 7, /* RPORT delete request */ - RPSM_EVENT_SCN = 8, /* state change notification */ + RPSM_EVENT_FAB_SCN = 8, /* state change notification */ RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */ RPSM_EVENT_FAILED = 10, /* Request to rport failed. */ RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */ @@ -744,7 +747,9 @@ enum rport_event { RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */ RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */ RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */ - RPSM_EVENT_FC4_FCS_ONLINE = 19, /*!< FC-4 FCS online complete */ + RPSM_EVENT_SCN_OFFLINE = 19, /* loop scn offline */ + RPSM_EVENT_SCN_ONLINE = 20, /* loop scn online */ + RPSM_EVENT_FC4_FCS_ONLINE = 21, /* FC-4 FCS online complete */ }; /* @@ -763,7 +768,7 @@ enum bfa_fcs_itnim_event { BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */ BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */ BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */ - BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /*!< bfa rport online event */ + BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /* bfa rport online event */ }; /* diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 3b75f6fb2de..1224d0462a4 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -23,6 +23,34 @@ BFA_TRC_FILE(FCS, PORT); +/* + * ALPA to LIXA bitmap mapping + * + * ALPA 0x00 (Word 0, Bit 30) is invalid for N_Ports. Also Word 0 Bit 31 + * is for L_bit (login required) and is filled as ALPA 0x00 here. + */ +static const u8 loop_alpa_map[] = { + 0x00, 0x00, 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, /* Word 0 Bits 31..24 */ + 0x17, 0x18, 0x1B, 0x1D, 0x1E, 0x1F, 0x23, 0x25, /* Word 0 Bits 23..16 */ + 0x26, 0x27, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, /* Word 0 Bits 15..08 */ + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x39, 0x3A, /* Word 0 Bits 07..00 */ + + 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 0x4A, 0x4B, /* Word 1 Bits 31..24 */ + 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 0x55, /* Word 1 Bits 23..16 */ + 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, /* Word 1 Bits 15..08 */ + 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, /* Word 1 Bits 07..00 */ + + 0x73, 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, /* Word 2 Bits 31..24 */ + 0x81, 0x82, 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, /* Word 2 Bits 23..16 */ + 0x9B, 0x9D, 0x9E, 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, /* Word 2 Bits 15..08 */ + 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xB1, 0xB2, /* Word 2 Bits 07..00 */ + + 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 0xBA, 0xBC, 0xC3, /* Word 3 Bits 31..24 */ + 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, /* Word 3 Bits 23..16 */ + 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD9, /* Word 3 Bits 15..08 */ + 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF, /* Word 3 Bits 07..00 */ +}; + static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl); @@ -51,6 +79,10 @@ static void bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port); + static struct { void (*init) (struct bfa_fcs_lport_s *port); void (*online) (struct bfa_fcs_lport_s *port); @@ -62,7 +94,9 @@ static struct { bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online, bfa_fcs_lport_fab_offline}, { bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online, - bfa_fcs_lport_n2n_offline}, + bfa_fcs_lport_n2n_offline}, { + bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online, + bfa_fcs_lport_loop_offline}, }; /* @@ -1127,7 +1161,7 @@ static void bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port) { bfa_fcs_lport_ns_online(port); - bfa_fcs_lport_scn_online(port); + bfa_fcs_lport_fab_scn_online(port); } /* @@ -1221,6 +1255,98 @@ bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port) n2n_port->reply_oxid = 0; } +void +bfa_fcport_get_loop_attr(struct bfa_fcs_lport_s *port) +{ + int i = 0, j = 0, bit = 0, alpa_bit = 0; + u8 k = 0; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(port->fcs->bfa); + + port->port_topo.ploop.alpabm_valid = fcport->alpabm_valid; + port->pid = fcport->myalpa; + port->pid = bfa_hton3b(port->pid); + + for (i = 0; i < (FC_ALPA_MAX / 8); i++) { + for (j = 0, alpa_bit = 0; j < 8; j++, alpa_bit++) { + bfa_trc(port->fcs->bfa, fcport->alpabm.alpa_bm[i]); + bit = (fcport->alpabm.alpa_bm[i] & (1 << (7 - j))); + if (bit) { + port->port_topo.ploop.alpa_pos_map[k] = + loop_alpa_map[(i * 8) + alpa_bit]; + k++; + bfa_trc(port->fcs->bfa, k); + bfa_trc(port->fcs->bfa, + port->port_topo.ploop.alpa_pos_map[k]); + } + } + } + port->port_topo.ploop.num_alpa = k; +} + +/* + * Called by fcs/port to initialize Loop topology. + */ +static void +bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port) +{ +} + +/* + * Called by fcs/port to notify transition to online state. + */ +static void +bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port) +{ + u8 num_alpa = 0, alpabm_valid = 0; + struct bfa_fcs_rport_s *rport; + u8 *alpa_map = NULL; + int i = 0; + u32 pid; + + bfa_fcport_get_loop_attr(port); + + num_alpa = port->port_topo.ploop.num_alpa; + alpabm_valid = port->port_topo.ploop.alpabm_valid; + alpa_map = port->port_topo.ploop.alpa_pos_map; + + bfa_trc(port->fcs->bfa, port->pid); + bfa_trc(port->fcs->bfa, num_alpa); + if (alpabm_valid == 1) { + for (i = 0; i < num_alpa; i++) { + bfa_trc(port->fcs->bfa, alpa_map[i]); + if (alpa_map[i] != bfa_hton3b(port->pid)) { + pid = alpa_map[i]; + bfa_trc(port->fcs->bfa, pid); + rport = bfa_fcs_lport_get_rport_by_pid(port, + bfa_hton3b(pid)); + if (!rport) + rport = bfa_fcs_rport_create(port, + bfa_hton3b(pid)); + } + } + } else { + for (i = 0; i < MAX_ALPA_COUNT; i++) { + if (alpa_map[i] != port->pid) { + pid = loop_alpa_map[i]; + bfa_trc(port->fcs->bfa, pid); + rport = bfa_fcs_lport_get_rport_by_pid(port, + bfa_hton3b(pid)); + if (!rport) + rport = bfa_fcs_rport_create(port, + bfa_hton3b(pid)); + } + } + } +} + +/* + * Called by fcs/port to notify transition to offline state. + */ +static void +bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port) +{ +} + #define BFA_FCS_FDMI_CMD_MAX_RETRIES 2 /* @@ -1888,13 +2014,10 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) sizeof(templen)); } - /* - * f/w Version = driver version - */ attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION); - templen = (u16) strlen(fcs_hba_attr->driver_version); - memcpy(attr->value, fcs_hba_attr->driver_version, templen); + templen = (u16) strlen(fcs_hba_attr->fw_version); + memcpy(attr->value, fcs_hba_attr->fw_version, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; @@ -2296,6 +2419,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, { struct bfa_fcs_lport_s *port = fdmi->ms->port; struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; + struct bfa_fcs_fdmi_port_attr_s fcs_port_attr; memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); @@ -2331,7 +2455,9 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, sizeof(driver_info->host_os_patch)); } - hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ); + /* Retrieve the max frame size from the port attr */ + bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr); + hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size; } static void @@ -2391,7 +2517,7 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, /* * Max PDU Size. */ - port_attr->max_frm_size = cpu_to_be32(FC_MAX_PDUSZ); + port_attr->max_frm_size = cpu_to_be32(pport_attr.pport_cfg.maxfrsize); /* * OS device Name @@ -5199,7 +5325,7 @@ bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *port) } void -bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *port) +bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *port) { struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); @@ -5621,6 +5747,15 @@ bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port) } /* + * Let new loop map create missing rports + */ +void +bfa_fcs_lport_lip_scn_online(struct bfa_fcs_lport_s *port) +{ + bfa_fcs_lport_loop_online(port); +} + +/* * FCS virtual port state machine */ diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index cc43b2a58ce..58ac643ba9f 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -106,9 +106,13 @@ static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event); -static void bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, +static void bfa_fcs_rport_sm_adisc_online_sending( + struct bfa_fcs_rport_s *rport, enum rport_event event); +static void bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s + *rport, enum rport_event event); +static void bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, enum rport_event event); @@ -150,8 +154,10 @@ static struct bfa_sm_table_s rport_sm_table[] = { {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE}, {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY}, {BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY}, - {BFA_SM(bfa_fcs_rport_sm_adisc_sending), BFA_RPORT_ADISC}, - {BFA_SM(bfa_fcs_rport_sm_adisc), BFA_RPORT_ADISC}, + {BFA_SM(bfa_fcs_rport_sm_adisc_online_sending), BFA_RPORT_ADISC}, + {BFA_SM(bfa_fcs_rport_sm_adisc_online), BFA_RPORT_ADISC}, + {BFA_SM(bfa_fcs_rport_sm_adisc_offline_sending), BFA_RPORT_ADISC}, + {BFA_SM(bfa_fcs_rport_sm_adisc_offline), BFA_RPORT_ADISC}, {BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV}, {BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO}, {BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE}, @@ -231,10 +237,19 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_send_plogiacc(rport, NULL); break; + case RPSM_EVENT_SCN_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; case RPSM_EVENT_ADDRESS_CHANGE: - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: /* query the NS */ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != + BFA_PORT_TOPOLOGY_LOOP)); bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); @@ -280,12 +295,20 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_PLOGI_RCVD: case RPSM_EVENT_PLOGI_COMP: - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: /* * Ignore, SCN is possibly online notification. */ break; + case RPSM_EVENT_SCN_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + case RPSM_EVENT_ADDRESS_CHANGE: bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); @@ -346,9 +369,19 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_send_plogiacc(rport, NULL); break; + case RPSM_EVENT_SCN_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_stop(&rport->timer); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + case RPSM_EVENT_ADDRESS_CHANGE: - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: bfa_timer_stop(&rport->timer); + WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != + BFA_PORT_TOPOLOGY_LOOP)); bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); @@ -422,7 +455,18 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) } break; - case RPSM_EVENT_PLOGI_RETRY: + case RPSM_EVENT_SCN_ONLINE: + break; + + case RPSM_EVENT_SCN_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_discard(rport->fcxp); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_PLOGI_RETRY: rport->plogi_retries = 0; bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry); bfa_timer_start(rport->fcs->bfa, &rport->timer, @@ -440,8 +484,10 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) break; case RPSM_EVENT_ADDRESS_CHANGE: - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: bfa_fcxp_discard(rport->fcxp); + WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != + BFA_PORT_TOPOLOGY_LOOP)); bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); @@ -512,7 +558,8 @@ bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_PLOGI_COMP: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_ADDRESS_CHANGE: - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: + case RPSM_EVENT_SCN_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcs_rport_fcs_offline_action(rport); break; @@ -561,9 +608,10 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_fcs_offline_action(rport); break; - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_SCN_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcs_rport_fcs_offline_action(rport); break; @@ -595,14 +643,15 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_trc(rport->fcs, event); switch (event) { - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsquery_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); } else { - bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending); + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_adisc_online_sending); bfa_fcs_rport_send_adisc(rport, NULL); } break; @@ -610,6 +659,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) case RPSM_EVENT_PLOGI_RCVD: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_SCN_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcs_rport_hal_offline_action(rport); break; @@ -625,6 +675,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_hal_offline_action(rport); break; + case RPSM_EVENT_SCN_ONLINE: case RPSM_EVENT_PLOGI_COMP: break; @@ -656,7 +707,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_hal_offline_action(rport); break; - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: /* * ignore SCN, wait for response to query itself */ @@ -696,7 +747,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) switch (event) { case RPSM_EVENT_ACCEPTED: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online_sending); bfa_fcs_rport_send_adisc(rport, NULL); break; @@ -718,7 +769,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_hal_offline_action(rport); break; - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: break; case RPSM_EVENT_LOGO_RCVD: @@ -747,7 +798,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) * authenticating with rport. FC-4s are paused. */ static void -bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, +bfa_fcs_rport_sm_adisc_online_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); @@ -756,7 +807,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, switch (event) { case RPSM_EVENT_FCXP_SENT: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online); break; case RPSM_EVENT_DELETE: @@ -779,7 +830,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_hal_offline_action(rport); break; - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: break; case RPSM_EVENT_PLOGI_RCVD: @@ -798,7 +849,8 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, * FC-4s are paused. */ static void -bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) +bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport, + enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); @@ -831,7 +883,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_hal_offline_action(rport); break; - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: /* * already processing RSCN */ @@ -856,7 +908,96 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) } /* - * Rport has sent LOGO. Awaiting FC-4 offline completion callback. + * ADISC is being sent for authenticating with rport + * Already did offline actions. + */ +static void +bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FCXP_SENT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_offline); + break; + + case RPSM_EVENT_DELETE: + case RPSM_EVENT_SCN_OFFLINE: + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, + &rport->fcxp_wqe); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * ADISC to rport + * Already did offline actions + */ +static void +bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_ACCEPTED: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); + bfa_fcs_rport_hal_online(rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_FAILED: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_DELETE: + case RPSM_EVENT_SCN_OFFLINE: + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_discard(rport->fcxp); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/* + * Rport has sent LOGO. Awaiting FC-4 offline completion callback. */ static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, @@ -881,6 +1022,8 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete); break; + case RPSM_EVENT_SCN_ONLINE: + case RPSM_EVENT_SCN_OFFLINE: case RPSM_EVENT_HCB_ONLINE: case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: @@ -945,6 +1088,8 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_hal_offline(rport); break; + case RPSM_EVENT_SCN_ONLINE: + break; case RPSM_EVENT_LOGO_RCVD: /* * Rport is going offline. Just ack the logo @@ -956,8 +1101,9 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_send_prlo_acc(rport); break; + case RPSM_EVENT_SCN_OFFLINE: case RPSM_EVENT_HCB_ONLINE: - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_ADDRESS_CHANGE: /* @@ -1015,6 +1161,19 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_sm_nsdisc_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); + } else if (bfa_fcport_get_topology(rport->port->fcs->bfa) == + BFA_PORT_TOPOLOGY_LOOP) { + if (rport->scn_online) { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_adisc_offline_sending); + bfa_fcs_rport_send_adisc(rport, NULL); + } else { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_offline); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + } } else { bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); rport->plogi_retries = 0; @@ -1027,7 +1186,9 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_free(rport); break; - case RPSM_EVENT_SCN: + case RPSM_EVENT_SCN_ONLINE: + case RPSM_EVENT_SCN_OFFLINE: + case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: case RPSM_EVENT_PLOGI_RCVD: @@ -1106,6 +1267,8 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); break; + case RPSM_EVENT_SCN_ONLINE: + case RPSM_EVENT_SCN_OFFLINE: case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: /* @@ -1146,6 +1309,8 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending); break; + case RPSM_EVENT_SCN_ONLINE: + case RPSM_EVENT_SCN_OFFLINE: case RPSM_EVENT_ADDRESS_CHANGE: break; @@ -1172,7 +1337,9 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_free(rport); break; - case RPSM_EVENT_SCN: + case RPSM_EVENT_SCN_ONLINE: + case RPSM_EVENT_SCN_OFFLINE: + case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_ADDRESS_CHANGE: break; @@ -1209,10 +1376,12 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_free(rport); break; - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_ADDRESS_CHANGE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); bfa_timer_stop(&rport->timer); + WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != + BFA_PORT_TOPOLOGY_LOOP)); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); break; @@ -1232,6 +1401,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_SCN_OFFLINE: break; case RPSM_EVENT_PLOGI_COMP: @@ -1240,6 +1410,12 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) bfa_fcs_rport_fcs_online_action(rport); break; + case RPSM_EVENT_SCN_ONLINE: + bfa_timer_stop(&rport->timer); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); + bfa_fcs_rport_send_plogi(rport, NULL); + break; + case RPSM_EVENT_PLOGI_SEND: bfa_timer_stop(&rport->timer); bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); @@ -1280,7 +1456,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_send_plogiacc(rport, NULL); break; - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: case RPSM_EVENT_PLOGI_SEND: @@ -1326,7 +1502,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_send_nsdisc(rport, NULL); break; - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_ADDRESS_CHANGE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); bfa_timer_stop(&rport->timer); @@ -1439,7 +1615,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_PRLO_RCVD: bfa_fcs_rport_send_prlo_acc(rport); break; - case RPSM_EVENT_SCN: + case RPSM_EVENT_FAB_SCN: /* * ignore, wait for NS query response */ @@ -2546,7 +2722,7 @@ void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport) { rport->stats.rscns++; - bfa_sm_send_event(rport, RPSM_EVENT_SCN); + bfa_sm_send_event(rport, RPSM_EVENT_FAB_SCN); } /* @@ -2621,6 +2797,48 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg, bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data); } +void +bfa_cb_rport_scn_online(struct bfa_s *bfa) +{ + struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs; + struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs); + struct bfa_fcs_rport_s *rp; + struct list_head *qe; + + list_for_each(qe, &port->rport_q) { + rp = (struct bfa_fcs_rport_s *) qe; + bfa_sm_send_event(rp, RPSM_EVENT_SCN_ONLINE); + rp->scn_online = BFA_TRUE; + } + + if (bfa_fcs_lport_is_online(port)) + bfa_fcs_lport_lip_scn_online(port); +} + +void +bfa_cb_rport_scn_no_dev(void *rport) +{ + struct bfa_fcs_rport_s *rp = rport; + + bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE); + rp->scn_online = BFA_FALSE; +} + +void +bfa_cb_rport_scn_offline(struct bfa_s *bfa) +{ + struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs; + struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs); + struct bfa_fcs_rport_s *rp; + struct list_head *qe; + + list_for_each(qe, &port->rport_q) { + rp = (struct bfa_fcs_rport_s *) qe; + bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE); + rp->scn_online = BFA_FALSE; + } +} + /* * brief * This routine is a static BFA callback when there is a QoS priority @@ -2808,6 +3026,9 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, struct bfa_rport_qos_attr_s qos_attr; struct bfa_fcs_lport_s *port = rport->port; bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed; + struct bfa_port_attr_s port_attr; + + bfa_fcport_get_attr(rport->fcs->bfa, &port_attr); memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s)); @@ -2838,7 +3059,8 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, rport_speed = bfa_fcport_get_ratelim_speed(rport->fcs->bfa); - if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port)) + if ((bfa_fcs_lport_get_rport_max_speed(port) != + BFA_PORT_SPEED_UNKNOWN) && (rport_speed < port_attr.speed)) rport_attr->trl_enforced = BFA_TRUE; } } diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 75ca8752b9f..0116c1032e2 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -731,8 +731,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) /* * Unlock the hw semaphore. Should be here only once per boot. */ - readl(iocpf->ioc->ioc_regs.ioc_sem_reg); - writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg); + bfa_ioc_ownership_reset(iocpf->ioc); /* * unlock init semaphore. @@ -1751,6 +1750,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) attr->card_type = be32_to_cpu(attr->card_type); attr->maxfrsize = be16_to_cpu(attr->maxfrsize); ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC); + attr->mfg_year = be16_to_cpu(attr->mfg_year); bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); } @@ -2497,6 +2497,9 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, ad_attr->cna_capable = bfa_ioc_is_cna(ioc); ad_attr->trunk_capable = (ad_attr->nports > 1) && !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz; + ad_attr->mfg_day = ioc_attr->mfg_day; + ad_attr->mfg_month = ioc_attr->mfg_month; + ad_attr->mfg_year = ioc_attr->mfg_year; } enum bfa_ioc_type_e @@ -2923,7 +2926,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc) return; } - if (ioc->iocpf.poll_time >= BFA_IOC_TOV) + if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV)) bfa_iocpf_timeout(ioc); else { ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; @@ -3016,7 +3019,6 @@ bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg) struct bfa_ablk_cfg_inst_s *cfg_inst; int i, j; u16 be16; - u32 be32; for (i = 0; i < BFA_ABLK_MAX; i++) { cfg_inst = &cfg->inst[i]; @@ -3027,8 +3029,10 @@ bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg) cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16); be16 = cfg_inst->pf_cfg[j].num_vectors; cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16); - be32 = cfg_inst->pf_cfg[j].bw; - cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32); + be16 = cfg_inst->pf_cfg[j].bw_min; + cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16); + be16 = cfg_inst->pf_cfg[j].bw_max; + cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16); } } } @@ -3170,7 +3174,8 @@ bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg, bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, - u8 port, enum bfi_pcifn_class personality, int bw, + u8 port, enum bfi_pcifn_class personality, + u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg) { struct bfi_ablk_h2i_pf_req_s *m; @@ -3194,7 +3199,8 @@ bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE, bfa_ioc_portid(ablk->ioc)); m->pers = cpu_to_be16((u16)personality); - m->bw = cpu_to_be32(bw); + m->bw_min = cpu_to_be16(bw_min); + m->bw_max = cpu_to_be16(bw_max); m->port = port; bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); @@ -3294,8 +3300,8 @@ bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode, } bfa_status_t -bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw, - bfa_ablk_cbfn_t cbfn, void *cbarg) +bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min, + u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg) { struct bfi_ablk_h2i_pf_req_s *m; @@ -3317,7 +3323,8 @@ bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw, bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE, bfa_ioc_portid(ablk->ioc)); m->pcifn = (u8)pcifn; - m->bw = cpu_to_be32(bw); + m->bw_min = cpu_to_be16(bw_min); + m->bw_max = cpu_to_be16(bw_max); bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); return BFA_STATUS_OK; @@ -4680,22 +4687,25 @@ diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp) diag->tsensor.temp->temp = be16_to_cpu(rsp->temp); diag->tsensor.temp->ts_junc = rsp->ts_junc; diag->tsensor.temp->ts_brd = rsp->ts_brd; - diag->tsensor.temp->status = BFA_STATUS_OK; if (rsp->ts_brd) { + /* tsensor.temp->status is brd_temp status */ + diag->tsensor.temp->status = rsp->status; if (rsp->status == BFA_STATUS_OK) { diag->tsensor.temp->brd_temp = be16_to_cpu(rsp->brd_temp); - } else { - bfa_trc(diag, rsp->status); + } else diag->tsensor.temp->brd_temp = 0; - diag->tsensor.temp->status = BFA_STATUS_DEVBUSY; - } } + + bfa_trc(diag, rsp->status); bfa_trc(diag, rsp->ts_junc); bfa_trc(diag, rsp->temp); bfa_trc(diag, rsp->ts_brd); bfa_trc(diag, rsp->brd_temp); + + /* tsensor status is always good bcos we always have junction temp */ + diag->tsensor.status = BFA_STATUS_OK; diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status); diag->tsensor.lock = 0; } @@ -4924,6 +4934,7 @@ bfa_diag_tsensor_query(struct bfa_diag_s *diag, diag->tsensor.temp = result; diag->tsensor.cbfn = cbfn; diag->tsensor.cbarg = cbarg; + diag->tsensor.status = BFA_STATUS_OK; /* Send msg to fw */ diag_tempsensor_send(diag); @@ -5615,7 +5626,7 @@ bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) } bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read); bfa_timer_start(dconf->bfa, &dconf->timer, - bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); + bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV); bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), BFA_FLASH_PART_DRV, dconf->instance, dconf->dconf, @@ -5655,7 +5666,7 @@ bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf, break; case BFA_DCONF_SM_TIMEOUT: bfa_sm_set_state(dconf, bfa_dconf_sm_ready); - bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED); + bfa_ioc_suspend(&dconf->bfa->ioc); break; case BFA_DCONF_SM_EXIT: bfa_timer_stop(&dconf->timer); @@ -5853,7 +5864,6 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status) struct bfa_s *bfa = arg; struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); - bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); if (status == BFA_STATUS_OK) { bfa_dconf_read_data_valid(bfa) = BFA_TRUE; if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) @@ -5861,6 +5871,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status) if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) dconf->dconf->hdr.version = BFI_DCONF_VERSION; } + bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE); } @@ -5945,3 +5956,448 @@ bfa_dconf_modexit(struct bfa_s *bfa) struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT); } + +/* + * FRU specific functions + */ + +#define BFA_FRU_DMA_BUF_SZ 0x02000 /* 8k dma buffer */ +#define BFA_FRU_CHINOOK_MAX_SIZE 0x10000 +#define BFA_FRU_LIGHTNING_MAX_SIZE 0x200 + +static void +bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event) +{ + struct bfa_fru_s *fru = cbarg; + + bfa_trc(fru, event); + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (fru->op_busy) { + fru->status = BFA_STATUS_IOC_FAILURE; + fru->cbfn(fru->cbarg, fru->status); + fru->op_busy = 0; + } + break; + + default: + break; + } +} + +/* + * Send fru write request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type) +{ + struct bfa_fru_s *fru = cbarg; + struct bfi_fru_write_req_s *msg = + (struct bfi_fru_write_req_s *) fru->mb.msg; + u32 len; + + msg->offset = cpu_to_be32(fru->addr_off + fru->offset); + len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ? + fru->residue : BFA_FRU_DMA_BUF_SZ; + msg->length = cpu_to_be32(len); + + /* + * indicate if it's the last msg of the whole write operation + */ + msg->last = (len == fru->residue) ? 1 : 0; + + bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc)); + bfa_alen_set(&msg->alen, len, fru->dbuf_pa); + + memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len); + bfa_ioc_mbox_queue(fru->ioc, &fru->mb); + + fru->residue -= len; + fru->offset += len; +} + +/* + * Send fru read request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type) +{ + struct bfa_fru_s *fru = cbarg; + struct bfi_fru_read_req_s *msg = + (struct bfi_fru_read_req_s *) fru->mb.msg; + u32 len; + + msg->offset = cpu_to_be32(fru->addr_off + fru->offset); + len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ? + fru->residue : BFA_FRU_DMA_BUF_SZ; + msg->length = cpu_to_be32(len); + bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc)); + bfa_alen_set(&msg->alen, len, fru->dbuf_pa); + bfa_ioc_mbox_queue(fru->ioc, &fru->mb); +} + +/* + * Flash memory info API. + * + * @param[in] mincfg - minimal cfg variable + */ +u32 +bfa_fru_meminfo(bfa_boolean_t mincfg) +{ + /* min driver doesn't need fru */ + if (mincfg) + return 0; + + return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Flash attach API. + * + * @param[in] fru - fru structure + * @param[in] ioc - ioc structure + * @param[in] dev - device structure + * @param[in] trcmod - trace module + * @param[in] logmod - log module + */ +void +bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev, + struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg) +{ + fru->ioc = ioc; + fru->trcmod = trcmod; + fru->cbfn = NULL; + fru->cbarg = NULL; + fru->op_busy = 0; + + bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru); + bfa_q_qe_init(&fru->ioc_notify); + bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru); + list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q); + + /* min driver doesn't need fru */ + if (mincfg) { + fru->dbuf_kva = NULL; + fru->dbuf_pa = 0; + } +} + +/* + * Claim memory for fru + * + * @param[in] fru - fru structure + * @param[in] dm_kva - pointer to virtual memory address + * @param[in] dm_pa - frusical memory address + * @param[in] mincfg - minimal cfg variable + */ +void +bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa, + bfa_boolean_t mincfg) +{ + if (mincfg) + return; + + fru->dbuf_kva = dm_kva; + fru->dbuf_pa = dm_pa; + memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ); + dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); + dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Update fru vpd image. + * + * @param[in] fru - fru structure + * @param[in] buf - update data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg) +{ + bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ); + bfa_trc(fru, len); + bfa_trc(fru, offset); + + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) + return BFA_STATUS_FRU_NOT_PRESENT; + + if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK) + return BFA_STATUS_CMD_NOTSUPP; + + if (!bfa_ioc_is_operational(fru->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (fru->op_busy) { + bfa_trc(fru, fru->op_busy); + return BFA_STATUS_DEVBUSY; + } + + fru->op_busy = 1; + + fru->cbfn = cbfn; + fru->cbarg = cbarg; + fru->residue = len; + fru->offset = 0; + fru->addr_off = offset; + fru->ubuf = buf; + + bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ); + + return BFA_STATUS_OK; +} + +/* + * Read fru vpd image. + * + * @param[in] fru - fru structure + * @param[in] buf - read data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg) +{ + bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ); + bfa_trc(fru, len); + bfa_trc(fru, offset); + + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) + return BFA_STATUS_FRU_NOT_PRESENT; + + if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK) + return BFA_STATUS_CMD_NOTSUPP; + + if (!bfa_ioc_is_operational(fru->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (fru->op_busy) { + bfa_trc(fru, fru->op_busy); + return BFA_STATUS_DEVBUSY; + } + + fru->op_busy = 1; + + fru->cbfn = cbfn; + fru->cbarg = cbarg; + fru->residue = len; + fru->offset = 0; + fru->addr_off = offset; + fru->ubuf = buf; + bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ); + + return BFA_STATUS_OK; +} + +/* + * Get maximum size fru vpd image. + * + * @param[in] fru - fru structure + * @param[out] size - maximum size of fru vpd data + * + * Return status. + */ +bfa_status_t +bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size) +{ + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) + return BFA_STATUS_FRU_NOT_PRESENT; + + if (!bfa_ioc_is_operational(fru->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK) + *max_size = BFA_FRU_CHINOOK_MAX_SIZE; + else + return BFA_STATUS_CMD_NOTSUPP; + return BFA_STATUS_OK; +} +/* + * tfru write. + * + * @param[in] fru - fru structure + * @param[in] buf - update data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg) +{ + bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ); + bfa_trc(fru, len); + bfa_trc(fru, offset); + bfa_trc(fru, *((u8 *) buf)); + + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) + return BFA_STATUS_FRU_NOT_PRESENT; + + if (!bfa_ioc_is_operational(fru->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (fru->op_busy) { + bfa_trc(fru, fru->op_busy); + return BFA_STATUS_DEVBUSY; + } + + fru->op_busy = 1; + + fru->cbfn = cbfn; + fru->cbarg = cbarg; + fru->residue = len; + fru->offset = 0; + fru->addr_off = offset; + fru->ubuf = buf; + + bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ); + + return BFA_STATUS_OK; +} + +/* + * tfru read. + * + * @param[in] fru - fru structure + * @param[in] buf - read data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg) +{ + bfa_trc(fru, BFI_TFRU_H2I_READ_REQ); + bfa_trc(fru, len); + bfa_trc(fru, offset); + + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) + return BFA_STATUS_FRU_NOT_PRESENT; + + if (!bfa_ioc_is_operational(fru->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (fru->op_busy) { + bfa_trc(fru, fru->op_busy); + return BFA_STATUS_DEVBUSY; + } + + fru->op_busy = 1; + + fru->cbfn = cbfn; + fru->cbarg = cbarg; + fru->residue = len; + fru->offset = 0; + fru->addr_off = offset; + fru->ubuf = buf; + bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ); + + return BFA_STATUS_OK; +} + +/* + * Process fru response messages upon receiving interrupts. + * + * @param[in] fruarg - fru structure + * @param[in] msg - message structure + */ +void +bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg) +{ + struct bfa_fru_s *fru = fruarg; + struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg; + u32 status; + + bfa_trc(fru, msg->mh.msg_id); + + if (!fru->op_busy) { + /* + * receiving response after ioc failure + */ + bfa_trc(fru, 0x9999); + return; + } + + switch (msg->mh.msg_id) { + case BFI_FRUVPD_I2H_WRITE_RSP: + case BFI_TFRU_I2H_WRITE_RSP: + status = be32_to_cpu(rsp->status); + bfa_trc(fru, status); + + if (status != BFA_STATUS_OK || fru->residue == 0) { + fru->status = status; + fru->op_busy = 0; + if (fru->cbfn) + fru->cbfn(fru->cbarg, fru->status); + } else { + bfa_trc(fru, fru->offset); + if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP) + bfa_fru_write_send(fru, + BFI_FRUVPD_H2I_WRITE_REQ); + else + bfa_fru_write_send(fru, + BFI_TFRU_H2I_WRITE_REQ); + } + break; + case BFI_FRUVPD_I2H_READ_RSP: + case BFI_TFRU_I2H_READ_RSP: + status = be32_to_cpu(rsp->status); + bfa_trc(fru, status); + + if (status != BFA_STATUS_OK) { + fru->status = status; + fru->op_busy = 0; + if (fru->cbfn) + fru->cbfn(fru->cbarg, fru->status); + } else { + u32 len = be32_to_cpu(rsp->length); + + bfa_trc(fru, fru->offset); + bfa_trc(fru, len); + + memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len); + fru->residue -= len; + fru->offset += len; + + if (fru->residue == 0) { + fru->status = status; + fru->op_busy = 0; + if (fru->cbfn) + fru->cbfn(fru->cbarg, fru->status); + } else { + if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP) + bfa_fru_read_send(fru, + BFI_FRUVPD_H2I_READ_REQ); + else + bfa_fru_read_send(fru, + BFI_TFRU_H2I_READ_REQ); + } + } + break; + default: + WARN_ON(1); + } +} diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index b2856f96567..23a90e7b710 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -702,6 +702,55 @@ void bfa_phy_memclaim(struct bfa_phy_s *phy, void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg); /* + * FRU module specific + */ +typedef void (*bfa_cb_fru_t) (void *cbarg, bfa_status_t status); + +struct bfa_fru_s { + struct bfa_ioc_s *ioc; /* back pointer to ioc */ + struct bfa_trc_mod_s *trcmod; /* trace module */ + u8 op_busy; /* operation busy flag */ + u8 rsv[3]; + u32 residue; /* residual length */ + u32 offset; /* offset */ + bfa_status_t status; /* status */ + u8 *dbuf_kva; /* dma buf virtual address */ + u64 dbuf_pa; /* dma buf physical address */ + struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ + bfa_cb_fru_t cbfn; /* user callback function */ + void *cbarg; /* user callback arg */ + u8 *ubuf; /* user supplied buffer */ + struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */ + u32 addr_off; /* fru address offset */ + struct bfa_mbox_cmd_s mb; /* mailbox */ + struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */ + struct bfa_mem_dma_s fru_dma; +}; + +#define BFA_FRU(__bfa) (&(__bfa)->modules.fru) +#define BFA_MEM_FRU_DMA(__bfa) (&(BFA_FRU(__bfa)->fru_dma)) + +bfa_status_t bfa_fruvpd_update(struct bfa_fru_s *fru, + void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg); +bfa_status_t bfa_fruvpd_read(struct bfa_fru_s *fru, + void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg); +bfa_status_t bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size); +bfa_status_t bfa_tfru_write(struct bfa_fru_s *fru, + void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg); +bfa_status_t bfa_tfru_read(struct bfa_fru_s *fru, + void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg); +u32 bfa_fru_meminfo(bfa_boolean_t mincfg); +void bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, + void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg); +void bfa_fru_memclaim(struct bfa_fru_s *fru, + u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg); +void bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg); + +/* * Driver Config( dconf) specific */ #define BFI_DCONF_SIGNATURE 0xabcdabcd @@ -716,6 +765,7 @@ struct bfa_dconf_hdr_s { struct bfa_dconf_s { struct bfa_dconf_hdr_s hdr; struct bfa_lunmask_cfg_s lun_mask; + struct bfa_throttle_cfg_s throttle_cfg; }; #pragma pack() @@ -738,6 +788,8 @@ struct bfa_dconf_mod_s { #define bfa_dconf_read_data_valid(__bfa) \ (BFA_DCONF_MOD(__bfa)->read_data_valid) #define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */ +#define bfa_dconf_get_min_cfg(__bfa) \ + (BFA_DCONF_MOD(__bfa)->min_cfg) void bfa_dconf_modinit(struct bfa_s *bfa); void bfa_dconf_modexit(struct bfa_s *bfa); @@ -761,7 +813,8 @@ bfa_status_t bfa_dconf_update(struct bfa_s *bfa); #define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize) #define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) #define bfa_ioc_speed_sup(__ioc) \ - BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) + ((bfa_ioc_is_cna(__ioc)) ? BFA_PORT_SPEED_10GBPS : \ + BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)) #define bfa_ioc_get_nports(__ioc) \ BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop) @@ -885,12 +938,12 @@ bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode, int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg); bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, - u8 port, enum bfi_pcifn_class personality, int bw, - bfa_ablk_cbfn_t cbfn, void *cbarg); + u8 port, enum bfi_pcifn_class personality, + u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg); bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn, bfa_ablk_cbfn_t cbfn, void *cbarg); -bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw, - bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, + u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg); bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg); bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c index 2eb0c6a2938..de4e726a126 100644 --- a/drivers/scsi/bfa/bfa_ioc_ct.c +++ b/drivers/scsi/bfa/bfa_ioc_ct.c @@ -57,13 +57,6 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) u32 usecnt; struct bfi_ioc_image_hdr_s fwhdr; - /* - * If bios boot (flash based) -- do not increment usage count - */ - if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < - BFA_IOC_FWIMG_MINSZ) - return BFA_TRUE; - bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); usecnt = readl(ioc->ioc_regs.ioc_usage_reg); @@ -115,13 +108,6 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) u32 usecnt; /* - * If bios boot (flash based) -- do not decrement usage count - */ - if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < - BFA_IOC_FWIMG_MINSZ) - return; - - /* * decrement usage count */ bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); @@ -400,13 +386,12 @@ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) { - if (bfa_ioc_is_cna(ioc)) { - bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); - writel(0, ioc->ioc_regs.ioc_usage_reg); - readl(ioc->ioc_regs.ioc_usage_sem_reg); - writel(1, ioc->ioc_regs.ioc_usage_sem_reg); - } + bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + writel(0, ioc->ioc_regs.ioc_usage_reg); + readl(ioc->ioc_regs.ioc_usage_sem_reg); + writel(1, ioc->ioc_regs.ioc_usage_sem_reg); + writel(0, ioc->ioc_regs.ioc_fail_sync); /* * Read the hw sem reg to make sure that it is locked * before we clear it. If it is not locked, writing 1 @@ -759,25 +744,6 @@ bfa_ioc_ct2_mem_init(void __iomem *rb) void bfa_ioc_ct2_mac_reset(void __iomem *rb) { - u32 r32; - - bfa_ioc_ct2_sclk_init(rb); - bfa_ioc_ct2_lclk_init(rb); - - /* - * release soft reset on s_clk & l_clk - */ - r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); - writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, - (rb + CT2_APP_PLL_SCLK_CTL_REG)); - - /* - * release soft reset on s_clk & l_clk - */ - r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); - writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, - (rb + CT2_APP_PLL_LCLK_CTL_REG)); - /* put port0, port1 MAC & AHB in reset */ writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), rb + CT2_CSI_MAC_CONTROL_REG(0)); @@ -785,8 +751,21 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb) rb + CT2_CSI_MAC_CONTROL_REG(1)); } +static void +bfa_ioc_ct2_enable_flash(void __iomem *rb) +{ + u32 r32; + + r32 = readl((rb + PSS_GPIO_OUT_REG)); + writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG)); + r32 = readl((rb + PSS_GPIO_OE_REG)); + writel(r32 | 1, (rb + PSS_GPIO_OE_REG)); +} + #define CT2_NFC_MAX_DELAY 1000 -#define CT2_NFC_VER_VALID 0x143 +#define CT2_NFC_PAUSE_MAX_DELAY 4000 +#define CT2_NFC_VER_VALID 0x147 +#define CT2_NFC_STATE_RUNNING 0x20000001 #define BFA_IOC_PLL_POLL 1000000 static bfa_boolean_t @@ -802,6 +781,20 @@ bfa_ioc_ct2_nfc_halted(void __iomem *rb) } static void +bfa_ioc_ct2_nfc_halt(void __iomem *rb) +{ + int i; + + writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG); + for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { + if (bfa_ioc_ct2_nfc_halted(rb)) + break; + udelay(1000); + } + WARN_ON(!bfa_ioc_ct2_nfc_halted(rb)); +} + +static void bfa_ioc_ct2_nfc_resume(void __iomem *rb) { u32 r32; @@ -817,105 +810,142 @@ bfa_ioc_ct2_nfc_resume(void __iomem *rb) WARN_ON(1); } -bfa_status_t -bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) +static void +bfa_ioc_ct2_clk_reset(void __iomem *rb) { - u32 wgn, r32, nfc_ver, i; + u32 r32; - wgn = readl(rb + CT2_WGN_STATUS); - nfc_ver = readl(rb + CT2_RSC_GPR15_REG); + bfa_ioc_ct2_sclk_init(rb); + bfa_ioc_ct2_lclk_init(rb); - if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) && - (nfc_ver >= CT2_NFC_VER_VALID)) { - if (bfa_ioc_ct2_nfc_halted(rb)) - bfa_ioc_ct2_nfc_resume(rb); + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, + (rb + CT2_APP_PLL_SCLK_CTL_REG)); - writel(__RESET_AND_START_SCLK_LCLK_PLLS, - rb + CT2_CSI_FW_CTL_SET_REG); + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, + (rb + CT2_APP_PLL_LCLK_CTL_REG)); - for (i = 0; i < BFA_IOC_PLL_POLL; i++) { - r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); - if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS) - break; - } +} - WARN_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); +static void +bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb) +{ + u32 r32, i; - for (i = 0; i < BFA_IOC_PLL_POLL; i++) { - r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); - if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)) - break; - } + r32 = readl((rb + PSS_CTL_REG)); + r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); + writel(r32, (rb + PSS_CTL_REG)); + + writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG); - WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); + for (i = 0; i < BFA_IOC_PLL_POLL; i++) { + r32 = readl(rb + CT2_NFC_FLASH_STS_REG); + + if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)) + break; + } + WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)); + + for (i = 0; i < BFA_IOC_PLL_POLL; i++) { + r32 = readl(rb + CT2_NFC_FLASH_STS_REG); + + if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)) + break; + } + WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)); + + r32 = readl(rb + CT2_CSI_FW_CTL_REG); + WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); +} + +static void +bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb) +{ + u32 r32; + int i; + + if (bfa_ioc_ct2_nfc_halted(rb)) + bfa_ioc_ct2_nfc_resume(rb); + for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) { + r32 = readl(rb + CT2_NFC_STS_REG); + if (r32 == CT2_NFC_STATE_RUNNING) + return; udelay(1000); + } - r32 = readl(rb + CT2_CSI_FW_CTL_REG); - WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); - } else { - writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG); - for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { - r32 = readl(rb + CT2_NFC_CSR_SET_REG); - if (r32 & __NFC_CONTROLLER_HALTED) - break; - udelay(1000); - } + r32 = readl(rb + CT2_NFC_STS_REG); + WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING)); +} - bfa_ioc_ct2_mac_reset(rb); - bfa_ioc_ct2_sclk_init(rb); - bfa_ioc_ct2_lclk_init(rb); +bfa_status_t +bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) +{ + u32 wgn, r32, nfc_ver; - /* - * release soft reset on s_clk & l_clk - */ - r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); - writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, - (rb + CT2_APP_PLL_SCLK_CTL_REG)); + wgn = readl(rb + CT2_WGN_STATUS); + if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { /* - * release soft reset on s_clk & l_clk + * If flash is corrupted, enable flash explicitly */ - r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); - writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, - (rb + CT2_APP_PLL_LCLK_CTL_REG)); - } + bfa_ioc_ct2_clk_reset(rb); + bfa_ioc_ct2_enable_flash(rb); - /* - * Announce flash device presence, if flash was corrupted. - */ - if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { - r32 = readl(rb + PSS_GPIO_OUT_REG); - writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG)); - r32 = readl(rb + PSS_GPIO_OE_REG); - writel(r32 | 1, (rb + PSS_GPIO_OE_REG)); + bfa_ioc_ct2_mac_reset(rb); + + bfa_ioc_ct2_clk_reset(rb); + bfa_ioc_ct2_enable_flash(rb); + + } else { + nfc_ver = readl(rb + CT2_RSC_GPR15_REG); + + if ((nfc_ver >= CT2_NFC_VER_VALID) && + (wgn == (__A2T_AHB_LOAD | __WGN_READY))) { + + bfa_ioc_ct2_wait_till_nfc_running(rb); + + bfa_ioc_ct2_nfc_clk_reset(rb); + } else { + bfa_ioc_ct2_nfc_halt(rb); + + bfa_ioc_ct2_clk_reset(rb); + bfa_ioc_ct2_mac_reset(rb); + bfa_ioc_ct2_clk_reset(rb); + + } } /* * Mask the interrupts and clear any - * pending interrupts. + * pending interrupts left by BIOS/EFI */ + writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); /* For first time initialization, no need to clear interrupts */ r32 = readl(rb + HOST_SEM5_REG); if (r32 & 0x1) { - r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT); + r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); if (r32 == 1) { - writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT); + writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); } - r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); + r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); if (r32 == 1) { - writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT); - readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); + writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); + readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); } } bfa_ioc_ct2_mem_init(rb); - writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG); - writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG); + writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); + writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); return BFA_STATUS_OK; } diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h index 189fff71e3c..a14c784ff3f 100644 --- a/drivers/scsi/bfa/bfa_modules.h +++ b/drivers/scsi/bfa/bfa_modules.h @@ -45,6 +45,7 @@ struct bfa_modules_s { struct bfa_diag_s diag_mod; /* diagnostics module */ struct bfa_phy_s phy; /* phy module */ struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */ + struct bfa_fru_s fru; /* fru module */ }; /* diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c index 95e4ad8759a..8ea7697deb9 100644 --- a/drivers/scsi/bfa/bfa_port.c +++ b/drivers/scsi/bfa/bfa_port.c @@ -250,6 +250,12 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, return BFA_STATUS_IOC_FAILURE; } + /* if port is d-port enabled, return error */ + if (port->dport_enabled) { + bfa_trc(port, BFA_STATUS_DPORT_ERR); + return BFA_STATUS_DPORT_ERR; + } + if (port->endis_pending) { bfa_trc(port, BFA_STATUS_DEVBUSY); return BFA_STATUS_DEVBUSY; @@ -300,6 +306,12 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, return BFA_STATUS_IOC_FAILURE; } + /* if port is d-port enabled, return error */ + if (port->dport_enabled) { + bfa_trc(port, BFA_STATUS_DPORT_ERR); + return BFA_STATUS_DPORT_ERR; + } + if (port->endis_pending) { bfa_trc(port, BFA_STATUS_DEVBUSY); return BFA_STATUS_DEVBUSY; @@ -431,6 +443,10 @@ bfa_port_notify(void *arg, enum bfa_ioc_event_e event) port->endis_cbfn = NULL; port->endis_pending = BFA_FALSE; } + + /* clear D-port mode */ + if (port->dport_enabled) + bfa_port_set_dportenabled(port, BFA_FALSE); break; default: break; @@ -467,6 +483,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, port->stats_cbfn = NULL; port->endis_cbfn = NULL; port->pbc_disabled = BFA_FALSE; + port->dport_enabled = BFA_FALSE; bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port); bfa_q_qe_init(&port->ioc_notify); @@ -483,6 +500,21 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, } /* + * bfa_port_set_dportenabled(); + * + * Port module- set pbc disabled flag + * + * @param[in] port - Pointer to the Port module data structure + * + * @return void + */ +void +bfa_port_set_dportenabled(struct bfa_port_s *port, bfa_boolean_t enabled) +{ + port->dport_enabled = enabled; +} + +/* * CEE module specific definitions */ diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h index 947f897328d..2fcab6bc628 100644 --- a/drivers/scsi/bfa/bfa_port.h +++ b/drivers/scsi/bfa/bfa_port.h @@ -45,6 +45,7 @@ struct bfa_port_s { bfa_status_t endis_status; struct bfa_ioc_notify_s ioc_notify; bfa_boolean_t pbc_disabled; + bfa_boolean_t dport_enabled; struct bfa_mem_dma_s port_dma; }; @@ -66,6 +67,8 @@ bfa_status_t bfa_port_disable(struct bfa_port_s *port, u32 bfa_port_meminfo(void); void bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa); +void bfa_port_set_dportenabled(struct bfa_port_s *port, + bfa_boolean_t enabled); /* * CEE declaration diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index b2538d60db3..299c1c889b3 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -67,6 +67,9 @@ enum bfa_fcport_sm_event { BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */ BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */ BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ + BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */ + BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */ + BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */ }; /* @@ -197,6 +200,10 @@ static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); @@ -226,6 +233,8 @@ static struct bfa_sm_table_s hal_port_sm_table[] = { {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED}, {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN}, {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN}, + {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT}, + {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG}, }; @@ -1244,6 +1253,12 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) * Just ignore */ break; + case BFA_LPS_SM_SET_N2N_PID: + /* + * When topology is set to loop, bfa_lps_set_n2n_pid() sends + * this event. Ignore this event. + */ + break; default: bfa_sm_fault(lps->bfa, event); @@ -1261,6 +1276,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) switch (event) { case BFA_LPS_SM_FWRSP: + case BFA_LPS_SM_OFFLINE: if (lps->status == BFA_STATUS_OK) { bfa_sm_set_state(lps, bfa_lps_sm_online); if (lps->fdisc) @@ -1289,7 +1305,6 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) bfa_lps_login_comp(lps); break; - case BFA_LPS_SM_OFFLINE: case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); break; @@ -2169,6 +2184,12 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; + case BFA_FCPORT_SM_FAA_MISCONFIG: + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); + break; + default: bfa_sm_fault(fcport->bfa, event); } @@ -2225,6 +2246,12 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; + case BFA_FCPORT_SM_FAA_MISCONFIG: + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); + break; + default: bfa_sm_fault(fcport->bfa, event); } @@ -2250,11 +2277,11 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { bfa_trc(fcport->bfa, - pevent->link_state.vc_fcf.fcf.fipenabled); + pevent->link_state.attr.vc_fcf.fcf.fipenabled); bfa_trc(fcport->bfa, - pevent->link_state.vc_fcf.fcf.fipfailed); + pevent->link_state.attr.vc_fcf.fcf.fipfailed); - if (pevent->link_state.vc_fcf.fcf.fipfailed) + if (pevent->link_state.attr.vc_fcf.fcf.fipfailed) bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_FIP_FCF_DISC, 0, "FIP FCF Discovery Failed"); @@ -2311,6 +2338,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; + case BFA_FCPORT_SM_FAA_MISCONFIG: + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); + break; + default: bfa_sm_fault(fcport->bfa, event); } @@ -2404,6 +2437,12 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, } break; + case BFA_FCPORT_SM_FAA_MISCONFIG: + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); + break; + default: bfa_sm_fault(fcport->bfa, event); } @@ -2449,6 +2488,12 @@ bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, bfa_reqq_wcancel(&fcport->reqq_wait); break; + case BFA_FCPORT_SM_FAA_MISCONFIG: + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); + break; + default: bfa_sm_fault(fcport->bfa, event); } @@ -2600,6 +2645,10 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); break; + case BFA_FCPORT_SM_DPORTENABLE: + bfa_sm_set_state(fcport, bfa_fcport_sm_dport); + break; + default: bfa_sm_fault(fcport->bfa, event); } @@ -2680,6 +2729,81 @@ bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, } } +static void +bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_DPORTENABLE: + case BFA_FCPORT_SM_DISABLE: + case BFA_FCPORT_SM_ENABLE: + case BFA_FCPORT_SM_START: + /* + * Ignore event for a port that is dport + */ + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); + break; + + case BFA_FCPORT_SM_DPORTDISABLE: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_DPORTENABLE: + case BFA_FCPORT_SM_ENABLE: + case BFA_FCPORT_SM_START: + /* + * Ignore event for a port as there is FAA misconfig + */ + break; + + case BFA_FCPORT_SM_DISABLE: + if (bfa_fcport_send_disable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); + else + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); + + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + /* * Link state is down */ @@ -2943,6 +3067,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, */ do_gettimeofday(&tv); fcport->stats_reset_time = tv.tv_sec; + fcport->stats_dma_ready = BFA_FALSE; /* * initialize and set default configuration @@ -2953,6 +3078,9 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, port_cfg->maxfrsize = 0; port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS; + port_cfg->qos_bw.high = BFA_QOS_BW_HIGH; + port_cfg->qos_bw.med = BFA_QOS_BW_MED; + port_cfg->qos_bw.low = BFA_QOS_BW_LOW; INIT_LIST_HEAD(&fcport->stats_pending_q); INIT_LIST_HEAD(&fcport->statsclr_pending_q); @@ -2996,6 +3124,21 @@ bfa_fcport_iocdisable(struct bfa_s *bfa) bfa_trunk_iocdisable(bfa); } +/* + * Update loop info in fcport for SCN online + */ +static void +bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport, + struct bfa_fcport_loop_info_s *loop_info) +{ + fcport->myalpa = loop_info->myalpa; + fcport->alpabm_valid = + loop_info->alpabm_val; + memcpy(fcport->alpabm.alpa_bm, + loop_info->alpabm.alpa_bm, + sizeof(struct fc_alpabm_s)); +} + static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) { @@ -3005,12 +3148,15 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) fcport->speed = pevent->link_state.speed; fcport->topology = pevent->link_state.topology; - if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) - fcport->myalpa = 0; + if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) { + bfa_fcport_update_loop_info(fcport, + &pevent->link_state.attr.loop_info); + return; + } /* QoS Details */ fcport->qos_attr = pevent->link_state.qos_attr; - fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr; + fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr; /* * update trunk state if applicable @@ -3019,7 +3165,8 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) trunk->attr.state = BFA_TRUNK_DISABLED; /* update FCoE specific */ - fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan); + fcport->fcoe_vlan = + be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan); bfa_trc(fcport->bfa, fcport->speed); bfa_trc(fcport->bfa, fcport->topology); @@ -3453,6 +3600,7 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) case BFI_FCPORT_I2H_ENABLE_RSP: if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) { + fcport->stats_dma_ready = BFA_TRUE; if (fcport->use_flash_cfg) { fcport->cfg = i2hmsg.penable_rsp->port_cfg; fcport->cfg.maxfrsize = @@ -3468,6 +3616,8 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) else fcport->trunk.attr.state = BFA_TRUNK_DISABLED; + fcport->qos_attr.qos_bw = + i2hmsg.penable_rsp->port_cfg.qos_bw; fcport->use_flash_cfg = BFA_FALSE; } @@ -3476,6 +3626,9 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) else fcport->qos_attr.state = BFA_QOS_DISABLED; + fcport->qos_attr.qos_bw_op = + i2hmsg.penable_rsp->port_cfg.qos_bw; + bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); } break; @@ -3488,8 +3641,17 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) case BFI_FCPORT_I2H_EVENT: if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP) bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); - else - bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN); + else { + if (i2hmsg.event->link_state.linkstate_rsn == + BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG) + bfa_sm_send_event(fcport, + BFA_FCPORT_SM_FAA_MISCONFIG); + else + bfa_sm_send_event(fcport, + BFA_FCPORT_SM_LINKDOWN); + } + fcport->qos_attr.qos_bw_op = + i2hmsg.event->link_state.qos_attr.qos_bw_op; break; case BFI_FCPORT_I2H_TRUNK_SCN: @@ -3609,6 +3771,9 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed) if (fcport->cfg.trunked == BFA_TRUE) return BFA_STATUS_TRUNK_ENABLED; + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && + (speed == BFA_PORT_SPEED_16GBPS)) + return BFA_STATUS_UNSUPP_SPEED; if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) { bfa_trc(bfa, fcport->speed_sup); return BFA_STATUS_UNSUPP_SPEED; @@ -3663,7 +3828,26 @@ bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology) switch (topology) { case BFA_PORT_TOPOLOGY_P2P: + break; + case BFA_PORT_TOPOLOGY_LOOP: + if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) || + (fcport->qos_attr.state != BFA_QOS_DISABLED)) + return BFA_STATUS_ERROR_QOS_ENABLED; + if (fcport->cfg.ratelimit != BFA_FALSE) + return BFA_STATUS_ERROR_TRL_ENABLED; + if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) || + (fcport->trunk.attr.state != BFA_TRUNK_DISABLED)) + return BFA_STATUS_ERROR_TRUNK_ENABLED; + if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) || + (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS)) + return BFA_STATUS_UNSUPP_SPEED; + if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) + return BFA_STATUS_LOOP_UNSUPP_MEZZ; + if (bfa_fcport_is_dport(bfa) != BFA_FALSE) + return BFA_STATUS_DPORT_ERR; + break; + case BFA_PORT_TOPOLOGY_AUTO: break; @@ -3686,6 +3870,17 @@ bfa_fcport_get_topology(struct bfa_s *bfa) return fcport->topology; } +/** + * Get config topology. + */ +enum bfa_port_topology +bfa_fcport_get_cfg_topology(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->cfg.topology; +} + bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) { @@ -3761,9 +3956,11 @@ bfa_fcport_get_maxfrsize(struct bfa_s *bfa) u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa) { - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP) + return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit; - return fcport->cfg.rx_bbcredit; + else + return 0; } void @@ -3850,8 +4047,9 @@ bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - if (bfa_ioc_is_disabled(&bfa->ioc)) - return BFA_STATUS_IOC_DISABLED; + if (!bfa_iocfc_is_operational(bfa) || + !fcport->stats_dma_ready) + return BFA_STATUS_IOC_NON_OP; if (!list_empty(&fcport->statsclr_pending_q)) return BFA_STATUS_DEVBUSY; @@ -3876,6 +4074,10 @@ bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + if (!bfa_iocfc_is_operational(bfa) || + !fcport->stats_dma_ready) + return BFA_STATUS_IOC_NON_OP; + if (!list_empty(&fcport->stats_pending_q)) return BFA_STATUS_DEVBUSY; @@ -3905,6 +4107,40 @@ bfa_fcport_is_disabled(struct bfa_s *bfa) } bfa_boolean_t +bfa_fcport_is_dport(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) == + BFA_PORT_ST_DPORT); +} + +bfa_status_t +bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); + + bfa_trc(bfa, ioc_type); + + if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0)) + return BFA_STATUS_QOS_BW_INVALID; + + if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100) + return BFA_STATUS_QOS_BW_INVALID; + + if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) || + (qos_bw->low > qos_bw->high)) + return BFA_STATUS_QOS_BW_INVALID; + + if ((ioc_type == BFA_IOC_TYPE_FC) && + (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP)) + fcport->cfg.qos_bw = *qos_bw; + + return BFA_STATUS_OK; +} + +bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); @@ -3981,6 +4217,26 @@ bfa_fcport_is_trunk_enabled(struct bfa_s *bfa) return fcport->cfg.trunked; } +void +bfa_fcport_dportenable(struct bfa_s *bfa) +{ + /* + * Assume caller check for port is in disable state + */ + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE); + bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE); +} + +void +bfa_fcport_dportdisable(struct bfa_s *bfa) +{ + /* + * Assume caller check for port is in disable state + */ + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE); + bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE); +} + /* * Rport State machine functions */ @@ -4707,6 +4963,21 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m) bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN); break; + case BFI_RPORT_I2H_LIP_SCN_ONLINE: + bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa), + &msg.lip_scn->loop_info); + bfa_cb_rport_scn_online(bfa); + break; + + case BFI_RPORT_I2H_LIP_SCN_OFFLINE: + bfa_cb_rport_scn_offline(bfa); + break; + + case BFI_RPORT_I2H_NO_DEV: + rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle); + bfa_cb_rport_scn_no_dev(rp->rport_drv); + break; + default: bfa_trc(bfa, m->mhdr.msg_id); WARN_ON(1); @@ -5348,6 +5619,37 @@ bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw) } /* + * Dport forward declaration + */ + +/* + * BFA DPORT state machine events + */ +enum bfa_dport_sm_event { + BFA_DPORT_SM_ENABLE = 1, /* dport enable event */ + BFA_DPORT_SM_DISABLE = 2, /* dport disable event */ + BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */ + BFA_DPORT_SM_QRESUME = 4, /* CQ space available */ + BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */ +}; + +static void bfa_dport_sm_disabled(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_enabling(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_enabled(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_disabling(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_qresume(void *cbarg); +static void bfa_dport_req_comp(struct bfa_dport_s *dport, + bfi_diag_dport_rsp_t *msg); + +/* * BFA fcdiag module */ #define BFA_DIAG_QTEST_TOV 1000 /* msec */ @@ -5377,15 +5679,24 @@ bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + struct bfa_dport_s *dport = &fcdiag->dport; + fcdiag->bfa = bfa; fcdiag->trcmod = bfa->trcmod; /* The common DIAG attach bfa_diag_attach() will do all memory claim */ + dport->bfa = bfa; + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport); + dport->cbfn = NULL; + dport->cbarg = NULL; } static void bfa_fcdiag_iocdisable(struct bfa_s *bfa) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + struct bfa_dport_s *dport = &fcdiag->dport; + bfa_trc(fcdiag, fcdiag->lb.lock); if (fcdiag->lb.lock) { fcdiag->lb.status = BFA_STATUS_IOC_FAILURE; @@ -5393,6 +5704,8 @@ bfa_fcdiag_iocdisable(struct bfa_s *bfa) fcdiag->lb.lock = 0; bfa_fcdiag_set_busy_status(fcdiag); } + + bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL); } static void @@ -5577,6 +5890,9 @@ bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg) case BFI_DIAG_I2H_QTEST: bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg); break; + case BFI_DIAG_I2H_DPORT: + bfa_dport_req_comp(&fcdiag->dport, (bfi_diag_dport_rsp_t *)msg); + break; default: bfa_trc(fcdiag, msg->mhdr.msg_id); WARN_ON(1); @@ -5646,12 +5962,18 @@ bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode, } } + /* + * For CT2, 1G is not supported + */ + if ((speed == BFA_PORT_SPEED_1GBPS) && + (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) { + bfa_trc(fcdiag, speed); + return BFA_STATUS_UNSUPP_SPEED; + } + /* For Mezz card, port speed entered needs to be checked */ if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) { if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) { - if ((speed == BFA_PORT_SPEED_1GBPS) && - (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) - return BFA_STATUS_UNSUPP_SPEED; if (!(speed == BFA_PORT_SPEED_1GBPS || speed == BFA_PORT_SPEED_2GBPS || speed == BFA_PORT_SPEED_4GBPS || @@ -5764,3 +6086,379 @@ bfa_fcdiag_lb_is_running(struct bfa_s *bfa) struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK; } + +/* + * D-port + */ +static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport, + enum bfi_dport_req req); +static void +bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status) +{ + if (dport->cbfn != NULL) { + dport->cbfn(dport->cbarg, bfa_status); + dport->cbfn = NULL; + dport->cbarg = NULL; + } +} + +static void +bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_ENABLE: + bfa_fcport_dportenable(dport->bfa); + if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE)) + bfa_sm_set_state(dport, bfa_dport_sm_enabling); + else + bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait); + break; + + case BFA_DPORT_SM_DISABLE: + /* Already disabled */ + break; + + case BFA_DPORT_SM_HWFAIL: + /* ignore */ + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_QRESUME: + bfa_sm_set_state(dport, bfa_dport_sm_enabling); + bfa_dport_send_req(dport, BFI_DPORT_ENABLE); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_reqq_wcancel(&dport->reqq_wait); + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_FWRSP: + bfa_sm_set_state(dport, bfa_dport_sm_enabled); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_ENABLE: + /* Already enabled */ + break; + + case BFA_DPORT_SM_DISABLE: + bfa_fcport_dportdisable(dport->bfa); + if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE)) + bfa_sm_set_state(dport, bfa_dport_sm_disabling); + else + bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_QRESUME: + bfa_sm_set_state(dport, bfa_dport_sm_disabling); + bfa_dport_send_req(dport, BFI_DPORT_DISABLE); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_reqq_wcancel(&dport->reqq_wait); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_FWRSP: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + + +static bfa_boolean_t +bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req) +{ + struct bfi_diag_dport_req_s *m; + + /* + * Increment message tag before queue check, so that responses to old + * requests are discarded. + */ + dport->msgtag++; + + /* + * check for room in queue to send request now + */ + m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG); + if (!m) { + bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT, + bfa_fn_lpu(dport->bfa)); + m->req = req; + m->msgtag = dport->msgtag; + + /* + * queue I/O message to firmware + */ + bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh); + + return BFA_TRUE; +} + +static void +bfa_dport_qresume(void *cbarg) +{ + struct bfa_dport_s *dport = cbarg; + + bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME); +} + +static void +bfa_dport_req_comp(struct bfa_dport_s *dport, bfi_diag_dport_rsp_t *msg) +{ + bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP); + bfa_cb_fcdiag_dport(dport, msg->status); +} + +/* + * Dport enable + * + * @param[in] *bfa - bfa data struct + */ +bfa_status_t +bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + struct bfa_dport_s *dport = &fcdiag->dport; + + /* + * Dport is not support in MEZZ card + */ + if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) { + bfa_trc(dport->bfa, BFA_STATUS_PBC); + return BFA_STATUS_CMD_NOTSUPP_MEZZ; + } + + /* + * Check to see if IOC is down + */ + if (!bfa_iocfc_is_operational(bfa)) + return BFA_STATUS_IOC_NON_OP; + + /* if port is PBC disabled, return error */ + if (bfa_fcport_is_pbcdisabled(bfa)) { + bfa_trc(dport->bfa, BFA_STATUS_PBC); + return BFA_STATUS_PBC; + } + + /* + * Check if port mode is FC port + */ + if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) { + bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc)); + return BFA_STATUS_CMD_NOTSUPP_CNA; + } + + /* + * Check if port is in LOOP mode + */ + if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) || + (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_TOPOLOGY_LOOP; + } + + /* + * Check if port is TRUNK mode + */ + if (bfa_fcport_is_trunk_enabled(bfa)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_ERROR_TRUNK_ENABLED; + } + + /* + * Check to see if port is disable or in dport state + */ + if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) && + (bfa_fcport_is_dport(bfa) == BFA_FALSE)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_PORT_NOT_DISABLED; + } + + /* + * Check if dport is busy + */ + if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) || + bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) || + bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) || + bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) { + return BFA_STATUS_DEVBUSY; + } + + /* + * Check if dport is already enabled + */ + if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_DPORT_ENABLED; + } + + dport->cbfn = cbfn; + dport->cbarg = cbarg; + + bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE); + return BFA_STATUS_OK; +} + +/* + * Dport disable + * + * @param[in] *bfa - bfa data struct + */ +bfa_status_t +bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + struct bfa_dport_s *dport = &fcdiag->dport; + + if (bfa_ioc_is_disabled(&bfa->ioc)) + return BFA_STATUS_IOC_DISABLED; + + /* if port is PBC disabled, return error */ + if (bfa_fcport_is_pbcdisabled(bfa)) { + bfa_trc(dport->bfa, BFA_STATUS_PBC); + return BFA_STATUS_PBC; + } + + /* + * Check to see if port is disable or in dport state + */ + if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) && + (bfa_fcport_is_dport(bfa) == BFA_FALSE)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_PORT_NOT_DISABLED; + } + + /* + * Check if dport is busy + */ + if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) || + bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) || + bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) || + bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) + return BFA_STATUS_DEVBUSY; + + /* + * Check if dport is already disabled + */ + if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_DPORT_DISABLED; + } + + dport->cbfn = cbfn; + dport->cbarg = cbarg; + + bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE); + return BFA_STATUS_OK; +} + +/* + * Get D-port state + * + * @param[in] *bfa - bfa data struct + */ + +bfa_status_t +bfa_dport_get_state(struct bfa_s *bfa, enum bfa_dport_state *state) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + struct bfa_dport_s *dport = &fcdiag->dport; + + if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) + *state = BFA_DPORT_ST_ENABLED; + else if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) || + bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait)) + *state = BFA_DPORT_ST_ENABLING; + else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) + *state = BFA_DPORT_ST_DISABLED; + else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) || + bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) + *state = BFA_DPORT_ST_DISABLING; + else { + bfa_trc(dport->bfa, BFA_STATUS_EINVAL); + return BFA_STATUS_EINVAL; + } + return BFA_STATUS_OK; +} diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h index 1abcf7c5166..8d7fbecfcb2 100644 --- a/drivers/scsi/bfa/bfa_svc.h +++ b/drivers/scsi/bfa/bfa_svc.h @@ -474,8 +474,10 @@ struct bfa_fcport_s { /* supported speeds */ enum bfa_port_speed speed; /* current speed */ enum bfa_port_topology topology; /* current topology */ - u8 myalpa; /* my ALPA in LOOP topology */ u8 rsvd[3]; + u8 myalpa; /* my ALPA in LOOP topology */ + u8 alpabm_valid; /* alpa bitmap valid or not */ + struct fc_alpabm_s alpabm; /* alpa bitmap */ struct bfa_port_cfg_s cfg; /* current port configuration */ bfa_boolean_t use_flash_cfg; /* get port cfg from flash */ struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ @@ -512,6 +514,7 @@ struct bfa_fcport_s { struct bfa_fcport_trunk_s trunk; u16 fcoe_vlan; struct bfa_mem_dma_s fcport_dma; + bfa_boolean_t stats_dma_ready; }; #define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport) @@ -534,6 +537,7 @@ enum bfa_port_speed bfa_fcport_get_speed(struct bfa_s *bfa); bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topo); enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa); +enum bfa_port_topology bfa_fcport_get_cfg_topology(struct bfa_s *bfa); bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); u8 bfa_fcport_get_myalpa(struct bfa_s *bfa); @@ -547,6 +551,9 @@ void bfa_fcport_event_register(struct bfa_s *bfa, void (*event_cbfn) (void *cbarg, enum bfa_port_linkstate event), void *event_cbarg); bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); +bfa_boolean_t bfa_fcport_is_dport(struct bfa_s *bfa); +bfa_status_t bfa_fcport_set_qos_bw(struct bfa_s *bfa, + struct bfa_qos_bw_s *qos_bw); enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn); @@ -560,6 +567,8 @@ bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb); bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa); bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa); +void bfa_fcport_dportenable(struct bfa_s *bfa); +void bfa_fcport_dportdisable(struct bfa_s *bfa); bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state); @@ -575,6 +584,9 @@ void bfa_cb_rport_offline(void *rport); void bfa_cb_rport_qos_scn_flowid(void *rport, struct bfa_rport_qos_attr_s old_qos_attr, struct bfa_rport_qos_attr_s new_qos_attr); +void bfa_cb_rport_scn_online(struct bfa_s *bfa); +void bfa_cb_rport_scn_offline(struct bfa_s *bfa); +void bfa_cb_rport_scn_no_dev(void *rp); void bfa_cb_rport_qos_scn_prio(void *rport, struct bfa_rport_qos_attr_s old_qos_attr, struct bfa_rport_qos_attr_s new_qos_attr); @@ -697,11 +709,21 @@ struct bfa_fcdiag_lb_s { u32 status; }; +struct bfa_dport_s { + struct bfa_s *bfa; /* Back pointer to BFA */ + bfa_sm_t sm; /* finite state machine */ + u32 msgtag; /* firmware msg tag for reply */ + struct bfa_reqq_wait_s reqq_wait; + bfa_cb_diag_t cbfn; + void *cbarg; +}; + struct bfa_fcdiag_s { struct bfa_s *bfa; /* Back pointer to BFA */ struct bfa_trc_mod_s *trcmod; struct bfa_fcdiag_lb_s lb; struct bfa_fcdiag_qtest_s qtest; + struct bfa_dport_s dport; }; #define BFA_FCDIAG_MOD(__bfa) (&(__bfa)->modules.fcdiag) @@ -717,5 +739,11 @@ bfa_status_t bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 ignore, u32 queue, struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn, void *cbarg); bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa); +bfa_status_t bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, + void *cbarg); +bfa_status_t bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, + void *cbarg); +bfa_status_t bfa_dport_get_state(struct bfa_s *bfa, + enum bfa_dport_state *state); #endif /* __BFA_SVC_H__ */ diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index c37494916a1..895b0e516e0 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -63,9 +63,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS; u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; -#define BFAD_FW_FILE_CB "cbfw.bin" -#define BFAD_FW_FILE_CT "ctfw.bin" -#define BFAD_FW_FILE_CT2 "ct2fw.bin" +#define BFAD_FW_FILE_CB "cbfw-3.1.0.0.bin" +#define BFAD_FW_FILE_CT "ctfw-3.1.0.0.bin" +#define BFAD_FW_FILE_CT2 "ct2fw-3.1.0.0.bin" static u32 *bfad_load_fwimg(struct pci_dev *pdev); static void bfad_free_fwimg(void); diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index 0afa39076ce..555e7db94a1 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -33,7 +33,7 @@ bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd) /* If IOC is not in disabled state - return */ if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); - iocmd->status = BFA_STATUS_IOC_FAILURE; + iocmd->status = BFA_STATUS_OK; return rc; } @@ -54,6 +54,12 @@ bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd) unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); + if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; + return rc; + } + if (bfad->disable_active) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); return -EBUSY; @@ -101,9 +107,10 @@ bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd) /* set adapter hw path */ strcpy(iocmd->adapter_hwpath, bfad->pci_name); - i = strlen(iocmd->adapter_hwpath) - 1; - while (iocmd->adapter_hwpath[i] != '.') - i--; + for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++) + ; + for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; ) + ; iocmd->adapter_hwpath[i] = '\0'; iocmd->status = BFA_STATUS_OK; return 0; @@ -880,6 +887,19 @@ out: } int +bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd) +{ + struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +int bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; @@ -888,16 +908,22 @@ bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd) spin_lock_irqsave(&bfad->bfad_lock, flags); - if (cmd == IOCMD_RATELIM_ENABLE) - fcport->cfg.ratelimit = BFA_TRUE; - else if (cmd == IOCMD_RATELIM_DISABLE) - fcport->cfg.ratelimit = BFA_FALSE; + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else { + if (cmd == IOCMD_RATELIM_ENABLE) + fcport->cfg.ratelimit = BFA_TRUE; + else if (cmd == IOCMD_RATELIM_DISABLE) + fcport->cfg.ratelimit = BFA_FALSE; - if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) - fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; + if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) + fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; + + iocmd->status = BFA_STATUS_OK; + } spin_unlock_irqrestore(&bfad->bfad_lock, flags); - iocmd->status = BFA_STATUS_OK; return 0; } @@ -919,8 +945,13 @@ bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd) return 0; } - fcport->cfg.trl_def_speed = iocmd->speed; - iocmd->status = BFA_STATUS_OK; + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else { + fcport->cfg.trl_def_speed = iocmd->speed; + iocmd->status = BFA_STATUS_OK; + } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; @@ -1167,8 +1198,8 @@ bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd) spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk, &iocmd->pcifn_id, iocmd->port, - iocmd->pcifn_class, iocmd->bandwidth, - bfad_hcb_comp, &fcomp); + iocmd->pcifn_class, iocmd->bw_min, + iocmd->bw_max, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; @@ -1211,8 +1242,8 @@ bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd) init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk, - iocmd->pcifn_id, iocmd->bandwidth, - bfad_hcb_comp, &fcomp); + iocmd->pcifn_id, iocmd->bw_min, + iocmd->bw_max, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_OK) @@ -1736,6 +1767,52 @@ bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd) } int +bfad_iocmd_diag_cfg_dport(struct bfad_s *bfad, unsigned int cmd, void *pcmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; + unsigned long flags; + struct bfad_hal_comp fcomp; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (cmd == IOCMD_DIAG_DPORT_ENABLE) + iocmd->status = bfa_dport_enable(&bfad->bfa, + bfad_hcb_comp, &fcomp); + else if (cmd == IOCMD_DIAG_DPORT_DISABLE) + iocmd->status = bfa_dport_disable(&bfad->bfa, + bfad_hcb_comp, &fcomp); + else { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (iocmd->status != BFA_STATUS_OK) + bfa_trc(bfad, iocmd->status); + else { + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + } + + return 0; +} + +int +bfad_iocmd_diag_dport_get_state(struct bfad_s *bfad, void *pcmd) +{ + struct bfa_bsg_diag_dport_get_state_s *iocmd = + (struct bfa_bsg_diag_dport_get_state_s *)pcmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_dport_get_state(&bfad->bfa, &iocmd->state); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +int bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_phy_attr_s *iocmd = @@ -2052,7 +2129,7 @@ bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd) init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), - BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn), + BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id, &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); @@ -2074,7 +2151,7 @@ bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd) init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), - BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn), + BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id, &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); @@ -2161,22 +2238,31 @@ bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) spin_lock_irqsave(&bfad->bfad_lock, flags); - if (v_cmd == IOCMD_TRUNK_ENABLE) { - trunk->attr.state = BFA_TRUNK_OFFLINE; - bfa_fcport_disable(&bfad->bfa); - fcport->cfg.trunked = BFA_TRUE; - } else if (v_cmd == IOCMD_TRUNK_DISABLE) { - trunk->attr.state = BFA_TRUNK_DISABLED; - bfa_fcport_disable(&bfad->bfa); - fcport->cfg.trunked = BFA_FALSE; - } + if (bfa_fcport_is_dport(&bfad->bfa)) + return BFA_STATUS_DPORT_ERR; - if (!bfa_fcport_is_disabled(&bfad->bfa)) - bfa_fcport_enable(&bfad->bfa); + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else { + if (v_cmd == IOCMD_TRUNK_ENABLE) { + trunk->attr.state = BFA_TRUNK_OFFLINE; + bfa_fcport_disable(&bfad->bfa); + fcport->cfg.trunked = BFA_TRUE; + } else if (v_cmd == IOCMD_TRUNK_DISABLE) { + trunk->attr.state = BFA_TRUNK_DISABLED; + bfa_fcport_disable(&bfad->bfa); + fcport->cfg.trunked = BFA_FALSE; + } + + if (!bfa_fcport_is_disabled(&bfad->bfa)) + bfa_fcport_enable(&bfad->bfa); + + iocmd->status = BFA_STATUS_OK; + } spin_unlock_irqrestore(&bfad->bfad_lock, flags); - iocmd->status = BFA_STATUS_OK; return 0; } @@ -2189,12 +2275,17 @@ bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd) unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); - memcpy((void *)&iocmd->attr, (void *)&trunk->attr, - sizeof(struct bfa_trunk_attr_s)); - iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa); + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else { + memcpy((void *)&iocmd->attr, (void *)&trunk->attr, + sizeof(struct bfa_trunk_attr_s)); + iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa); + iocmd->status = BFA_STATUS_OK; + } spin_unlock_irqrestore(&bfad->bfad_lock, flags); - iocmd->status = BFA_STATUS_OK; return 0; } @@ -2207,14 +2298,22 @@ bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) spin_lock_irqsave(&bfad->bfad_lock, flags); if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { - if (v_cmd == IOCMD_QOS_ENABLE) - fcport->cfg.qos_enabled = BFA_TRUE; - else if (v_cmd == IOCMD_QOS_DISABLE) - fcport->cfg.qos_enabled = BFA_FALSE; + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else { + if (v_cmd == IOCMD_QOS_ENABLE) + fcport->cfg.qos_enabled = BFA_TRUE; + else if (v_cmd == IOCMD_QOS_DISABLE) { + fcport->cfg.qos_enabled = BFA_FALSE; + fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH; + fcport->cfg.qos_bw.med = BFA_QOS_BW_MED; + fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW; + } + } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); - iocmd->status = BFA_STATUS_OK; return 0; } @@ -2226,11 +2325,21 @@ bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd) unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); - iocmd->attr.state = fcport->qos_attr.state; - iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr); + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else { + iocmd->attr.state = fcport->qos_attr.state; + iocmd->attr.total_bb_cr = + be32_to_cpu(fcport->qos_attr.total_bb_cr); + iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high; + iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med; + iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low; + iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op; + iocmd->status = BFA_STATUS_OK; + } spin_unlock_irqrestore(&bfad->bfad_lock, flags); - iocmd->status = BFA_STATUS_OK; return 0; } @@ -2274,6 +2383,7 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd) struct bfad_hal_comp fcomp; unsigned long flags; struct bfa_cb_pending_q_s cb_qe; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); init_completion(&fcomp.comp); bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, @@ -2281,7 +2391,11 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd) spin_lock_irqsave(&bfad->bfad_lock, flags); WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); - iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else + iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); @@ -2300,6 +2414,7 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd) struct bfad_hal_comp fcomp; unsigned long flags; struct bfa_cb_pending_q_s cb_qe; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); init_completion(&fcomp.comp); bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, @@ -2307,7 +2422,11 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd) spin_lock_irqsave(&bfad->bfad_lock, flags); WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); - iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); + if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && + (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) + iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; + else + iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); @@ -2435,6 +2554,139 @@ bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) return 0; } +int +bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcpim_throttle_s *iocmd = + (struct bfa_bsg_fcpim_throttle_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa, + (void *)&iocmd->throttle); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +int +bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcpim_throttle_s *iocmd = + (struct bfa_bsg_fcpim_throttle_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa, + iocmd->throttle.cfg_value); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +int +bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_tfru_s *iocmd = + (struct bfa_bsg_tfru_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa), + &iocmd->data, iocmd->len, iocmd->offset, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status == BFA_STATUS_OK) { + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + } + + return 0; +} + +int +bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_tfru_s *iocmd = + (struct bfa_bsg_tfru_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa), + &iocmd->data, iocmd->len, iocmd->offset, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status == BFA_STATUS_OK) { + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + } + + return 0; +} + +int +bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fruvpd_s *iocmd = + (struct bfa_bsg_fruvpd_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa), + &iocmd->data, iocmd->len, iocmd->offset, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status == BFA_STATUS_OK) { + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + } + + return 0; +} + +int +bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fruvpd_s *iocmd = + (struct bfa_bsg_fruvpd_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa), + &iocmd->data, iocmd->len, iocmd->offset, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status == BFA_STATUS_OK) { + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + } + + return 0; +} + +int +bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fruvpd_max_size_s *iocmd = + (struct bfa_bsg_fruvpd_max_size_s *)cmd; + unsigned long flags = 0; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa), + &iocmd->max_size); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + static int bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, unsigned int payload_len) @@ -2660,6 +2912,13 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, case IOCMD_DIAG_LB_STAT: rc = bfad_iocmd_diag_lb_stat(bfad, iocmd); break; + case IOCMD_DIAG_DPORT_ENABLE: + case IOCMD_DIAG_DPORT_DISABLE: + rc = bfad_iocmd_diag_cfg_dport(bfad, cmd, iocmd); + break; + case IOCMD_DIAG_DPORT_GET_STATE: + rc = bfad_iocmd_diag_dport_get_state(bfad, iocmd); + break; case IOCMD_PHY_GET_ATTR: rc = bfad_iocmd_phy_get_attr(bfad, iocmd); break; @@ -2741,6 +3000,9 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, case IOCMD_QOS_RESET_STATS: rc = bfad_iocmd_qos_reset_stats(bfad, iocmd); break; + case IOCMD_QOS_SET_BW: + rc = bfad_iocmd_qos_set_bw(bfad, iocmd); + break; case IOCMD_VF_GET_STATS: rc = bfad_iocmd_vf_get_stats(bfad, iocmd); break; @@ -2759,6 +3021,29 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, case IOCMD_FCPIM_LUNMASK_DELETE: rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd); break; + case IOCMD_FCPIM_THROTTLE_QUERY: + rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd); + break; + case IOCMD_FCPIM_THROTTLE_SET: + rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd); + break; + /* TFRU */ + case IOCMD_TFRU_READ: + rc = bfad_iocmd_tfru_read(bfad, iocmd); + break; + case IOCMD_TFRU_WRITE: + rc = bfad_iocmd_tfru_write(bfad, iocmd); + break; + /* FRU */ + case IOCMD_FRUVPD_READ: + rc = bfad_iocmd_fruvpd_read(bfad, iocmd); + break; + case IOCMD_FRUVPD_UPDATE: + rc = bfad_iocmd_fruvpd_update(bfad, iocmd); + break; + case IOCMD_FRUVPD_GET_MAX_SIZE: + rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd); + break; default: rc = -EINVAL; break; diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h index 8c569ddb750..15e1fc8e796 100644 --- a/drivers/scsi/bfa/bfad_bsg.h +++ b/drivers/scsi/bfa/bfad_bsg.h @@ -141,6 +141,17 @@ enum { IOCMD_FCPIM_LUNMASK_QUERY, IOCMD_FCPIM_LUNMASK_ADD, IOCMD_FCPIM_LUNMASK_DELETE, + IOCMD_DIAG_DPORT_ENABLE, + IOCMD_DIAG_DPORT_DISABLE, + IOCMD_DIAG_DPORT_GET_STATE, + IOCMD_QOS_SET_BW, + IOCMD_FCPIM_THROTTLE_QUERY, + IOCMD_FCPIM_THROTTLE_SET, + IOCMD_TFRU_READ, + IOCMD_TFRU_WRITE, + IOCMD_FRUVPD_READ, + IOCMD_FRUVPD_UPDATE, + IOCMD_FRUVPD_GET_MAX_SIZE, }; struct bfa_bsg_gen_s { @@ -463,7 +474,8 @@ struct bfa_bsg_pcifn_s { bfa_status_t status; u16 bfad_num; u16 pcifn_id; - u32 bandwidth; + u16 bw_min; + u16 bw_max; u8 port; enum bfi_pcifn_class pcifn_class; u8 rsvd[1]; @@ -613,6 +625,13 @@ struct bfa_bsg_diag_lb_stat_s { u16 rsvd; }; +struct bfa_bsg_diag_dport_get_state_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + enum bfa_dport_state state; +}; + struct bfa_bsg_phy_attr_s { bfa_status_t status; u16 bfad_num; @@ -694,6 +713,13 @@ struct bfa_bsg_qos_vc_attr_s { struct bfa_qos_vc_attr_s attr; }; +struct bfa_bsg_qos_bw_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_qos_bw_s qos_bw; +}; + struct bfa_bsg_vf_stats_s { bfa_status_t status; u16 bfad_num; @@ -722,6 +748,41 @@ struct bfa_bsg_fcpim_lunmask_s { struct scsi_lun lun; }; +struct bfa_bsg_fcpim_throttle_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + struct bfa_defs_fcpim_throttle_s throttle; +}; + +#define BFA_TFRU_DATA_SIZE 64 +#define BFA_MAX_FRUVPD_TRANSFER_SIZE 0x1000 + +struct bfa_bsg_tfru_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 offset; + u32 len; + u8 data[BFA_TFRU_DATA_SIZE]; +}; + +struct bfa_bsg_fruvpd_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 offset; + u32 len; + u8 data[BFA_MAX_FRUVPD_TRANSFER_SIZE]; +}; + +struct bfa_bsg_fruvpd_max_size_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 max_size; +}; + struct bfa_bsg_fcpt_s { bfa_status_t status; u16 vf_id; diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index 1840651ce1d..0c64a04f01f 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -57,7 +57,7 @@ #ifdef BFA_DRIVER_VERSION #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION #else -#define BFAD_DRIVER_VERSION "3.1.2.0" +#define BFAD_DRIVER_VERSION "3.1.2.1" #endif #define BFAD_PROTO_NAME FCPI_NAME diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h index b2ba0b2e91b..57b146bca18 100644 --- a/drivers/scsi/bfa/bfi.h +++ b/drivers/scsi/bfa/bfi.h @@ -210,7 +210,8 @@ enum bfi_mclass { BFI_MC_PORT = 21, /* Physical port */ BFI_MC_SFP = 22, /* SFP module */ BFI_MC_PHY = 25, /* External PHY message class */ - BFI_MC_MAX = 32 + BFI_MC_FRU = 34, + BFI_MC_MAX = 35 }; #define BFI_IOC_MAX_CQS 4 @@ -288,6 +289,9 @@ struct bfi_ioc_attr_s { char optrom_version[BFA_VERSION_LEN]; struct bfa_mfg_vpd_s vpd; u32 card_type; /* card type */ + u8 mfg_day; /* manufacturing day */ + u8 mfg_month; /* manufacturing month */ + u16 mfg_year; /* manufacturing year */ }; /* @@ -687,7 +691,8 @@ struct bfi_ablk_h2i_pf_req_s { u8 pcifn; u8 port; u16 pers; - u32 bw; + u16 bw_min; /* percent BW @ max speed */ + u16 bw_max; /* percent BW @ max speed */ }; /* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */ @@ -957,6 +962,7 @@ enum bfi_diag_h2i { BFI_DIAG_H2I_TEMPSENSOR = 4, BFI_DIAG_H2I_LEDTEST = 5, BFI_DIAG_H2I_QTEST = 6, + BFI_DIAG_H2I_DPORT = 7, }; enum bfi_diag_i2h { @@ -966,6 +972,7 @@ enum bfi_diag_i2h { BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR), BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST), BFI_DIAG_I2H_QTEST = BFA_I2HM(BFI_DIAG_H2I_QTEST), + BFI_DIAG_I2H_DPORT = BFA_I2HM(BFI_DIAG_H2I_DPORT), }; #define BFI_DIAG_MAX_SGES 2 @@ -1052,6 +1059,23 @@ struct bfi_diag_qtest_req_s { #define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s /* + * D-port test + */ +enum bfi_dport_req { + BFI_DPORT_DISABLE = 0, /* disable dport request */ + BFI_DPORT_ENABLE = 1, /* enable dport request */ +}; + +struct bfi_diag_dport_req_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + u8 req; /* request 1: enable 0: disable */ + u8 status; /* reply status */ + u8 rsvd[2]; + u32 msgtag; /* msgtag for reply */ +}; +#define bfi_diag_dport_rsp_t struct bfi_diag_dport_req_s + +/* * PHY module specific */ enum bfi_phy_h2i_msgs_e { @@ -1147,6 +1171,50 @@ struct bfi_phy_write_rsp_s { u32 length; }; +enum bfi_fru_h2i_msgs { + BFI_FRUVPD_H2I_WRITE_REQ = 1, + BFI_FRUVPD_H2I_READ_REQ = 2, + BFI_TFRU_H2I_WRITE_REQ = 3, + BFI_TFRU_H2I_READ_REQ = 4, +}; + +enum bfi_fru_i2h_msgs { + BFI_FRUVPD_I2H_WRITE_RSP = BFA_I2HM(1), + BFI_FRUVPD_I2H_READ_RSP = BFA_I2HM(2), + BFI_TFRU_I2H_WRITE_RSP = BFA_I2HM(3), + BFI_TFRU_I2H_READ_RSP = BFA_I2HM(4), +}; + +/* + * FRU write request + */ +struct bfi_fru_write_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 last; + u8 rsv[3]; + u32 offset; + u32 length; + struct bfi_alen_s alen; +}; + +/* + * FRU read request + */ +struct bfi_fru_read_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 offset; + u32 length; + struct bfi_alen_s alen; +}; + +/* + * FRU response + */ +struct bfi_fru_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 status; + u32 length; +}; #pragma pack() #endif /* __BFI_H__ */ diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h index d4220e13caf..5ae2c167b2c 100644 --- a/drivers/scsi/bfa/bfi_ms.h +++ b/drivers/scsi/bfa/bfi_ms.h @@ -426,6 +426,7 @@ struct bfi_lps_login_req_s { u8 auth_en; u8 lps_role; u8 bb_scn; + u32 vvl_flag; }; struct bfi_lps_login_rsp_s { @@ -499,6 +500,9 @@ enum bfi_rport_i2h_msgs { BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1), BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2), BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3), + BFI_RPORT_I2H_LIP_SCN_ONLINE = BFA_I2HM(4), + BFI_RPORT_I2H_LIP_SCN_OFFLINE = BFA_I2HM(5), + BFI_RPORT_I2H_NO_DEV = BFA_I2HM(6), }; struct bfi_rport_create_req_s { @@ -551,6 +555,14 @@ struct bfi_rport_qos_scn_s { struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */ }; +struct bfi_rport_lip_scn_s { + struct bfi_mhdr_s mh; /*!< common msg header */ + u16 bfa_handle; /*!< host rport handle */ + u8 status; /*!< scn online status */ + u8 rsvd; + struct bfa_fcport_loop_info_s loop_info; +}; + union bfi_rport_h2i_msg_u { struct bfi_msg_s *msg; struct bfi_rport_create_req_s *create_req; @@ -563,6 +575,7 @@ union bfi_rport_i2h_msg_u { struct bfi_rport_create_rsp_s *create_rsp; struct bfi_rport_delete_rsp_s *delete_rsp; struct bfi_rport_qos_scn_s *qos_scn_evt; + struct bfi_rport_lip_scn_s *lip_scn; }; /* @@ -828,6 +841,7 @@ enum bfi_tskim_status { */ BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */ BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */ + BFI_TSKIM_STS_UTAG = 12, /* unknown tag for request */ }; struct bfi_tskim_rsp_s { diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h index ed5f159e186..99133bcf53f 100644 --- a/drivers/scsi/bfa/bfi_reg.h +++ b/drivers/scsi/bfa/bfi_reg.h @@ -338,6 +338,7 @@ enum { #define __A2T_AHB_LOAD 0x00000800 #define __WGN_READY 0x00000400 #define __GLBL_PF_VF_CFG_RDY 0x00000200 +#define CT2_NFC_STS_REG 0x00027410 #define CT2_NFC_CSR_CLR_REG 0x00027420 #define CT2_NFC_CSR_SET_REG 0x00027424 #define __HALT_NFC_CONTROLLER 0x00000002 @@ -355,6 +356,8 @@ enum { (CT2_CSI_MAC0_CONTROL_REG + \ (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG)) +#define CT2_NFC_FLASH_STS_REG 0x00014834 +#define __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS 0x00000020 /* * Name semaphore registers based on usage */ |