diff options
Diffstat (limited to 'drivers/scsi/lpfc')
27 files changed, 11668 insertions, 4253 deletions
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index 88928f00aa2..e2516ba8ebf 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile @@ -1,7 +1,7 @@ #/******************************************************************* # * This file is part of the Emulex Linux Device Driver for * # * Fibre Channel Host Bus Adapters. * -# * Copyright (C) 2004-2011 Emulex. All rights reserved. * +# * Copyright (C) 2004-2012 Emulex. All rights reserved. * # * EMULEX and SLI are trademarks of Emulex. * # * www.emulex.com * # * * @@ -22,6 +22,10 @@ ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage ccflags-$(GCOV) += -O0 +ifdef WARNINGS_BECOME_ERRORS +ccflags-y += -Werror +endif + obj-$(CONFIG_SCSI_LPFC) := lpfc.o lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 825f9307417..434e9037908 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -27,6 +27,8 @@ struct lpfc_sli2_slim; +#define ELX_MODEL_NAME_SIZE 80 + #define LPFC_PCI_DEV_LP 0x1 #define LPFC_PCI_DEV_OC 0x2 @@ -44,10 +46,15 @@ struct lpfc_sli2_slim; #define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi cmnd for menlo needs nearly twice as for firmware downloads using bsg */ -#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ + +#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */ +#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */ +#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ +#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ +#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ + #define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ -#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ #define LPFC_VNAME_LEN 100 /* vport symbolic name length */ @@ -64,8 +71,8 @@ struct lpfc_sli2_slim; * queue depths when there are driver resource error or Firmware * resource error. */ -#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */ -#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */ +/* 1 Second */ +#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1)) /* Number of exchanges reserved for discovery to complete */ #define LPFC_DISC_IOCB_BUFF_COUNT 20 @@ -73,6 +80,8 @@ struct lpfc_sli2_slim; #define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */ #define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */ +#define LPFC_LOOK_AHEAD_OFF 0 /* Look ahead logic is turned off */ + /* Error Attention event polling interval */ #define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */ @@ -93,6 +102,13 @@ struct lpfc_sli2_slim; /* lpfc wait event data ready flag */ #define LPFC_DATA_READY (1<<0) +/* queue dump line buffer size */ +#define LPFC_LBUF_SZ 128 + +/* mailbox system shutdown options */ +#define LPFC_MBX_NO_WAIT 0 +#define LPFC_MBX_WAIT 1 + enum lpfc_polling_flags { ENABLE_FCP_RING_POLLING = 0x1, DISABLE_FCP_RING_INT = 0x2 @@ -403,6 +419,7 @@ struct lpfc_vport { uint32_t cfg_enable_da_id; uint32_t cfg_max_scsicmpl_time; uint32_t cfg_tgt_queue_depth; + uint32_t cfg_first_burst_size; uint32_t dev_loss_tmo_changed; @@ -455,11 +472,13 @@ enum intr_type_t { MSIX, }; +#define LPFC_CT_CTX_MAX 64 struct unsol_rcv_ct_ctx { uint32_t ctxt_id; uint32_t SID; - uint32_t flags; -#define UNSOL_VALID 0x00000001 + uint32_t valid; +#define UNSOL_INVALID 0 +#define UNSOL_VALID 1 uint16_t oxid; uint16_t rxid; }; @@ -534,6 +553,7 @@ struct lpfc_hba { void (*lpfc_scsi_prep_cmnd) (struct lpfc_vport *, struct lpfc_scsi_buf *, struct lpfc_nodelist *); + /* IOCB interface function jump table entries */ int (*__lpfc_sli_issue_iocb) (struct lpfc_hba *, uint32_t, @@ -541,8 +561,6 @@ struct lpfc_hba { void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *, struct lpfc_iocbq *); int (*lpfc_hba_down_post)(struct lpfc_hba *phba); - - IOCB_t * (*lpfc_get_iocb_from_iocbq) (struct lpfc_iocbq *); void (*lpfc_scsi_cmd_iocb_cmpl) @@ -551,10 +569,12 @@ struct lpfc_hba { /* MBOX interface function jump table entries */ int (*lpfc_sli_issue_mbox) (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); + /* Slow-path IOCB process function jump table entries */ void (*lpfc_sli_handle_slow_ring_event) (struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t mask); + /* INIT device interface function jump table entries */ int (*lpfc_sli_hbq_to_firmware) (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *); @@ -573,6 +593,10 @@ struct lpfc_hba { int (*lpfc_selective_reset) (struct lpfc_hba *); + int (*lpfc_bg_scsi_prep_dma_buf) + (struct lpfc_hba *, struct lpfc_scsi_buf *); + /* Add new entries here */ + /* SLI4 specific HBA data structure */ struct lpfc_sli4_hba sli4_hba; @@ -615,6 +639,8 @@ struct lpfc_hba { #define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */ #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ #define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ +#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */ +#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ struct lpfc_dmabuf slim2p; @@ -652,6 +678,7 @@ struct lpfc_hba { uint32_t lmt; uint32_t fc_topology; /* link topology, from LINK INIT */ + uint32_t fc_topology_changed; /* link topology, from LINK INIT */ struct lpfc_stats fc_stat; @@ -671,6 +698,8 @@ struct lpfc_hba { #define LPFC_FCF_FOV 1 /* Fast fcf failover */ #define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */ uint32_t cfg_fcf_failover_policy; + uint32_t cfg_fcp_io_sched; + uint32_t cfg_fcp2_no_tgt_reset; uint32_t cfg_cr_delay; uint32_t cfg_cr_count; uint32_t cfg_multi_ring_support; @@ -678,10 +707,12 @@ struct lpfc_hba { uint32_t cfg_multi_ring_type; uint32_t cfg_poll; uint32_t cfg_poll_tmo; + uint32_t cfg_task_mgmt_tmo; uint32_t cfg_use_msi; uint32_t cfg_fcp_imax; - uint32_t cfg_fcp_wq_count; - uint32_t cfg_fcp_eq_count; + uint32_t cfg_fcp_cpu_map; + uint32_t cfg_fcp_io_channel; + uint32_t cfg_total_seg_cnt; uint32_t cfg_sg_seg_cnt; uint32_t cfg_prot_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; @@ -690,13 +721,29 @@ struct lpfc_hba { uint32_t cfg_hba_queue_depth; uint32_t cfg_enable_hba_reset; uint32_t cfg_enable_hba_heartbeat; + uint32_t cfg_fof; + uint32_t cfg_EnableXLane; + uint8_t cfg_oas_tgt_wwpn[8]; + uint8_t cfg_oas_vpt_wwpn[8]; + uint32_t cfg_oas_lun_state; +#define OAS_LUN_ENABLE 1 +#define OAS_LUN_DISABLE 0 + uint32_t cfg_oas_lun_status; +#define OAS_LUN_STATUS_EXISTS 0x01 + uint32_t cfg_oas_flags; +#define OAS_FIND_ANY_VPORT 0x01 +#define OAS_FIND_ANY_TARGET 0x02 +#define OAS_LUN_VALID 0x04 + uint32_t cfg_XLanePriority; uint32_t cfg_enable_bg; uint32_t cfg_hostmem_hgp; uint32_t cfg_log_verbose; uint32_t cfg_aer_support; uint32_t cfg_sriov_nr_virtfn; + uint32_t cfg_request_firmware_upgrade; uint32_t cfg_iocb_cnt; uint32_t cfg_suppress_link_up; + uint32_t cfg_rrq_xri_bitmap_sz; #define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ #define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ @@ -719,7 +766,7 @@ struct lpfc_hba { uint32_t hbq_count; /* Count of configured HBQs */ struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ - uint32_t fcp_qidx; /* next work queue to post work to */ + atomic_t fcp_qidx; /* next work queue to post work to */ unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */ @@ -729,6 +776,15 @@ struct lpfc_hba { void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for PCI BAR2 */ + void __iomem *pci_bar0_memmap_p; /* Kernel memory mapped address for + PCI BAR0 with dual-ULP support */ + void __iomem *pci_bar2_memmap_p; /* Kernel memory mapped address for + PCI BAR2 with dual-ULP support */ + void __iomem *pci_bar4_memmap_p; /* Kernel memory mapped address for + PCI BAR4 with dual-ULP support */ +#define PCI_64BIT_BAR0 0 +#define PCI_64BIT_BAR2 2 +#define PCI_64BIT_BAR4 4 void __iomem *MBslimaddr; /* virtual address for mbox cmds */ void __iomem *HAregaddr; /* virtual address for host attn reg */ void __iomem *CAregaddr; /* virtual address for chip attn reg */ @@ -772,8 +828,10 @@ struct lpfc_hba { uint64_t bg_reftag_err_cnt; /* fastpath list. */ - spinlock_t scsi_buf_list_lock; - struct list_head lpfc_scsi_buf_list; + spinlock_t scsi_buf_list_get_lock; /* SCSI buf alloc list lock */ + spinlock_t scsi_buf_list_put_lock; /* SCSI buf free list lock */ + struct list_head lpfc_scsi_buf_list_get; + struct list_head lpfc_scsi_buf_list_put; uint32_t total_scsi_bufs; struct list_head lpfc_iocb_list; uint32_t total_iocbq_bufs; @@ -791,6 +849,7 @@ struct lpfc_hba { mempool_t *mbox_mem_pool; mempool_t *nlp_mem_pool; mempool_t *rrq_pool; + mempool_t *active_rrq_pool; struct fc_host_statistics link_stats; enum intr_type_t intr_type; @@ -825,7 +884,6 @@ struct lpfc_hba { atomic_t num_cmd_success; unsigned long last_rsrc_error_time; unsigned long last_ramp_down_time; - unsigned long last_ramp_up_time; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct dentry *hba_debugfs_root; atomic_t debugfs_vport_count; @@ -835,9 +893,12 @@ struct lpfc_hba { struct dentry *debug_dumpData; /* BlockGuard BPL */ struct dentry *debug_dumpDif; /* BlockGuard BPL */ struct dentry *debug_InjErrLBA; /* LBA to inject errors at */ + struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */ + struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */ struct dentry *debug_writeGuard; /* inject write guard_tag errors */ struct dentry *debug_writeApp; /* inject write app_tag errors */ struct dentry *debug_writeRef; /* inject write ref_tag errors */ + struct dentry *debug_readGuard; /* inject read guard_tag errors */ struct dentry *debug_readApp; /* inject read app_tag errors */ struct dentry *debug_readRef; /* inject read ref_tag errors */ @@ -845,10 +906,13 @@ struct lpfc_hba { uint32_t lpfc_injerr_wgrd_cnt; uint32_t lpfc_injerr_wapp_cnt; uint32_t lpfc_injerr_wref_cnt; + uint32_t lpfc_injerr_rgrd_cnt; uint32_t lpfc_injerr_rapp_cnt; uint32_t lpfc_injerr_rref_cnt; + uint32_t lpfc_injerr_nportid; + struct lpfc_name lpfc_injerr_wwpn; sector_t lpfc_injerr_lba; -#define LPFC_INJERR_LBA_OFF (sector_t)0xffffffffffffffff +#define LPFC_INJERR_LBA_OFF (sector_t)(-1) struct dentry *debug_slow_ring_trc; struct lpfc_debugfs_trc *slow_ring_trc; @@ -901,6 +965,8 @@ struct lpfc_hba { atomic_t fast_event_count; uint32_t fcoe_eventtag; uint32_t fcoe_eventtag_at_fcf_scan; + uint32_t fcoe_cvl_eventtag; + uint32_t fcoe_cvl_eventtag_attn; struct lpfc_fcf fcf; uint8_t fc_map[3]; uint8_t valid_vlan; @@ -909,7 +975,7 @@ struct lpfc_hba { spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */ struct list_head ct_ev_waiters; - struct unsol_rcv_ct_ctx ct_ctx[64]; + struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX]; uint32_t ctx_idx; uint8_t menlo_flag; /* menlo generic flags */ @@ -919,6 +985,9 @@ struct lpfc_hba { atomic_t sdev_cnt; uint8_t fips_spec_rev; uint8_t fips_level; + spinlock_t devicelock; /* lock for luns list */ + mempool_t *device_data_mem_pool; + struct list_head luns; }; static inline struct Scsi_Host * diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index f6697cb0e21..1d7a5c34ee8 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -353,7 +353,7 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr, struct lpfc_hba *phba = vport->phba; uint32_t if_type; uint8_t sli_family; - char fwrev[32]; + char fwrev[FW_REV_STR_SIZE]; int len; lpfc_decode_firmware_rev(phba, fwrev, 1); @@ -529,6 +529,27 @@ lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr, } /** + * lpfc_oas_supported_show - Return whether or not Optimized Access Storage + * (OAS) is supported. + * @dev: class unused variable. + * @attr: device attribute, not used. + * @buf: on return contains the module description text. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return snprintf(buf, PAGE_SIZE, "%d\n", + phba->sli4_hba.pc_sli4_params.oas_supported); +} + +/** * lpfc_link_state_store - Transition the link_state on an HBA port * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. @@ -692,7 +713,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) */ for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; - while (pring->txcmplq_cnt) { + while (!list_empty(&pring->txcmplq)) { msleep(10); if (cnt++ > 500) { /* 5 secs */ lpfc_printf_log(phba, @@ -744,10 +765,12 @@ lpfc_selective_reset(struct lpfc_hba *phba) if (!phba->cfg_enable_hba_reset) return -EACCES; - status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); + if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) { + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); - if (status != 0) - return status; + if (status != 0) + return status; + } init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, @@ -814,7 +837,7 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr, * the readyness after performing a firmware reset. * * Returns: - * zero for success, -EPERM when port does not have privilage to perform the + * zero for success, -EPERM when port does not have privilege to perform the * reset, -EIO when port timeout from recovering from the reset. * * Note: @@ -831,7 +854,7 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, &portstat_reg.word0); - /* verify if privilaged for the request operation */ + /* verify if privileged for the request operation */ if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) && !bf_get(lpfc_sliport_status_err, &portstat_reg)) return -EPERM; @@ -895,10 +918,16 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) pci_disable_sriov(pdev); phba->cfg_sriov_nr_virtfn = 0; } + + if (opcode == LPFC_FW_DUMP) + phba->hba_flag |= HBA_FW_DUMP_OP; + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); - if (status != 0) + if (status != 0) { + phba->hba_flag &= ~HBA_FW_DUMP_OP; return status; + } /* wait for the device to be quiesced before firmware reset */ msleep(100); @@ -922,11 +951,15 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) rc = lpfc_sli4_pdev_status_reg_wait(phba); if (rc == -EPERM) { - /* no privilage for reset, restore if needed */ - if (before_fc_flag & FC_OFFLINE_MODE) - goto out; + /* no privilege for reset */ + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3150 No privilege to perform the requested " + "access: x%x\n", reg_val); } else if (rc == -EIO) { /* reset failed, there is nothing more we can do */ + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3153 Fail to perform the requested " + "access: x%x\n", reg_val); return rc; } @@ -1858,8 +1891,10 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ { \ if (val >= minval && val <= maxval) {\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ - "3053 lpfc_" #attr " changed from %d to %d\n", \ - vport->cfg_##attr, val); \ + "3053 lpfc_" #attr \ + " changed from %d (x%x) to %d (x%x)\n", \ + vport->cfg_##attr, vport->cfg_##attr, \ + val, val); \ vport->cfg_##attr = val;\ return 0;\ }\ @@ -2032,9 +2067,53 @@ static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL); static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO, lpfc_sriov_hw_max_virtfn_show, NULL); static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); +static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, + NULL); static char *lpfc_soft_wwn_key = "C99G71SL8032A"; +#define WWN_SZ 8 +/** + * lpfc_wwn_set - Convert string to the 8 byte WWN value. + * @buf: WWN string. + * @cnt: Length of string. + * @wwn: Array to receive converted wwn value. + * + * Returns: + * -EINVAL if the buffer does not contain a valid wwn + * 0 success + **/ +static size_t +lpfc_wwn_set(const char *buf, size_t cnt, char wwn[]) +{ + unsigned int i, j; + + /* Count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) || + ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) + return -EINVAL; + + memset(wwn, 0, WWN_SZ); + /* Validate and store the new name */ + for (i = 0, j = 0; i < 16; i++) { + if ((*buf >= 'a') && (*buf <= 'f')) + j = ((j << 4) | ((*buf++ - 'a') + 10)); + else if ((*buf >= 'A') && (*buf <= 'F')) + j = ((j << 4) | ((*buf++ - 'A') + 10)); + else if ((*buf >= '0') && (*buf <= '9')) + j = ((j << 4) | (*buf++ - '0')); + else + return -EINVAL; + if (i % 2) { + wwn[i/2] = j & 0xff; + j = 0; + } + } + return 0; +} /** * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid * @dev: class device that is converted into a Scsi_host. @@ -2123,9 +2202,9 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct completion online_compl; - int stat1=0, stat2=0; - unsigned int i, j, cnt=count; - u8 wwpn[8]; + int stat1 = 0, stat2 = 0; + unsigned int cnt = count; + u8 wwpn[WWN_SZ]; int rc; if (!phba->cfg_enable_hba_reset) @@ -2140,29 +2219,19 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, if (buf[cnt-1] == '\n') cnt--; - if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) || - ((cnt == 17) && (*buf++ != 'x')) || - ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) + if (!phba->soft_wwn_enable) return -EINVAL; + /* lock setting wwpn, wwnn down */ phba->soft_wwn_enable = 0; - memset(wwpn, 0, sizeof(wwpn)); - - /* Validate and store the new name */ - for (i=0, j=0; i < 16; i++) { - int value; - - value = hex_to_bin(*buf++); - if (value >= 0) - j = (j << 4) | value; - else - return -EINVAL; - if (i % 2) { - wwpn[i/2] = j & 0xff; - j = 0; - } + rc = lpfc_wwn_set(buf, cnt, wwpn); + if (!rc) { + /* not able to set wwpn, unlock it */ + phba->soft_wwn_enable = 1; + return rc; } + phba->cfg_soft_wwpn = wwn_to_u64(wwpn); fc_host_port_name(shost) = phba->cfg_soft_wwpn; if (phba->cfg_soft_wwnn) @@ -2189,7 +2258,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, "reinit adapter - %d\n", stat2); return (stat1 || stat2) ? -EIO : count; } -static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\ +static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR, lpfc_soft_wwpn_show, lpfc_soft_wwpn_store); /** @@ -2226,39 +2295,25 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr, { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - unsigned int i, j, cnt=count; - u8 wwnn[8]; + unsigned int cnt = count; + u8 wwnn[WWN_SZ]; + int rc; /* count may include a LF at end of string */ if (buf[cnt-1] == '\n') cnt--; - if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) || - ((cnt == 17) && (*buf++ != 'x')) || - ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) + if (!phba->soft_wwn_enable) return -EINVAL; - /* - * Allow wwnn to be set many times, as long as the enable is set. - * However, once the wwpn is set, everything locks. - */ - - memset(wwnn, 0, sizeof(wwnn)); - - /* Validate and store the new name */ - for (i=0, j=0; i < 16; i++) { - int value; - - value = hex_to_bin(*buf++); - if (value >= 0) - j = (j << 4) | value; - else - return -EINVAL; - if (i % 2) { - wwnn[i/2] = j & 0xff; - j = 0; - } + rc = lpfc_wwn_set(buf, cnt, wwnn); + if (!rc) { + /* Allow wwnn to be set many times, as long as the enable + * is set. However, once the wwpn is set, everything locks. + */ + return rc; } + phba->cfg_soft_wwnn = wwn_to_u64(wwnn); dev_printk(KERN_NOTICE, &phba->pcidev->dev, @@ -2267,9 +2322,438 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr, return count; } -static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\ +static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR, lpfc_soft_wwnn_show, lpfc_soft_wwnn_store); +/** + * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for + * Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * value of count + **/ +static ssize_t +lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return snprintf(buf, PAGE_SIZE, "0x%llx\n", + wwn_to_u64(phba->cfg_oas_tgt_wwpn)); +} + +/** + * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for + * Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: Size of the data buffer. + * + * Returns: + * -EINVAL count is invalid, invalid wwpn byte invalid + * -EPERM oas is not supported by hba + * value of count on success + **/ +static ssize_t +lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + unsigned int cnt = count; + uint8_t wwpn[WWN_SZ]; + int rc; + + if (!phba->cfg_fof) + return -EPERM; + + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + rc = lpfc_wwn_set(buf, cnt, wwpn); + if (rc) + return rc; + + memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); + memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); + if (wwn_to_u64(wwpn) == 0) + phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET; + else + phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET; + phba->cfg_oas_flags &= ~OAS_LUN_VALID; + phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; + return count; +} +static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR, + lpfc_oas_tgt_show, lpfc_oas_tgt_store); + +/** + * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled + * for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * value of count on success + **/ +static ssize_t +lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return snprintf(buf, PAGE_SIZE, "0x%llx\n", + wwn_to_u64(phba->cfg_oas_vpt_wwpn)); +} + +/** + * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled + * for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: Size of the data buffer. + * + * Returns: + * -EINVAL count is invalid, invalid wwpn byte invalid + * -EPERM oas is not supported by hba + * value of count on success + **/ +static ssize_t +lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + unsigned int cnt = count; + uint8_t wwpn[WWN_SZ]; + int rc; + + if (!phba->cfg_fof) + return -EPERM; + + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + rc = lpfc_wwn_set(buf, cnt, wwpn); + if (rc) + return rc; + + memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); + memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); + if (wwn_to_u64(wwpn) == 0) + phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT; + else + phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT; + phba->cfg_oas_flags &= ~OAS_LUN_VALID; + phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; + return count; +} +static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR, + lpfc_oas_vpt_show, lpfc_oas_vpt_store); + +/** + * lpfc_oas_lun_state_show - Return the current state (enabled or disabled) + * of whether luns will be enabled or disabled + * for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * size of formatted string. + **/ +static ssize_t +lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state); +} + +/** + * lpfc_oas_lun_state_store - Store the state (enabled or disabled) + * of whether luns will be enabled or disabled + * for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: Size of the data buffer. + * + * Returns: + * -EINVAL count is invalid, invalid wwpn byte invalid + * -EPERM oas is not supported by hba + * value of count on success + **/ +static ssize_t +lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + int val = 0; + + if (!phba->cfg_fof) + return -EPERM; + + if (!isdigit(buf[0])) + return -EINVAL; + + if (sscanf(buf, "%i", &val) != 1) + return -EINVAL; + + if ((val != 0) && (val != 1)) + return -EINVAL; + + phba->cfg_oas_lun_state = val; + + return strlen(buf); +} +static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR, + lpfc_oas_lun_state_show, lpfc_oas_lun_state_store); + +/** + * lpfc_oas_lun_status_show - Return the status of the Optimized Access + * Storage (OAS) lun returned by the + * lpfc_oas_lun_show function. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * size of formatted string. + **/ +static ssize_t +lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + if (!(phba->cfg_oas_flags & OAS_LUN_VALID)) + return -EFAULT; + + return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status); +} +static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO, + lpfc_oas_lun_status_show, NULL); + + +/** + * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage + * (OAS) operations. + * @phba: lpfc_hba pointer. + * @ndlp: pointer to fcp target node. + * @lun: the fc lun for setting oas state. + * @oas_state: the oas state to be set to the lun. + * + * Returns: + * SUCCESS : 0 + * -EPERM OAS is not enabled or not supported by this port. + * + */ +static size_t +lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[], + uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state) +{ + + int rc = 0; + + if (!phba->cfg_fof) + return -EPERM; + + if (oas_state) { + if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, + (struct lpfc_name *)tgt_wwpn, lun)) + rc = -ENOMEM; + } else { + lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, + (struct lpfc_name *)tgt_wwpn, lun); + } + return rc; + +} + +/** + * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized + * Access Storage (OAS) operations. + * @phba: lpfc_hba pointer. + * @vpt_wwpn: wwpn of the vport associated with the returned lun + * @tgt_wwpn: wwpn of the target associated with the returned lun + * @lun_status: status of the lun returned lun + * + * Returns the first or next lun enabled for OAS operations for the vport/target + * specified. If a lun is found, its vport wwpn, target wwpn and status is + * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned. + * + * Return: + * lun that is OAS enabled for the vport/target + * NOT_OAS_ENABLED_LUN when no oas enabled lun found. + */ +static uint64_t +lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[], + uint8_t tgt_wwpn[], uint32_t *lun_status) +{ + uint64_t found_lun; + + if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn) + return NOT_OAS_ENABLED_LUN; + if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *) + phba->sli4_hba.oas_next_vpt_wwpn, + (struct lpfc_name *) + phba->sli4_hba.oas_next_tgt_wwpn, + &phba->sli4_hba.oas_next_lun, + (struct lpfc_name *)vpt_wwpn, + (struct lpfc_name *)tgt_wwpn, + &found_lun, lun_status)) + return found_lun; + else + return NOT_OAS_ENABLED_LUN; +} + +/** + * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations + * @phba: lpfc_hba pointer. + * @vpt_wwpn: vport wwpn by reference. + * @tgt_wwpn: target wwpn by reference. + * @lun: the fc lun for setting oas state. + * @oas_state: the oas state to be set to the oas_lun. + * + * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE) + * a lun for OAS operations. + * + * Return: + * SUCCESS: 0 + * -ENOMEM: failed to enable an lun for OAS operations + * -EPERM: OAS is not enabled + */ +static ssize_t +lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[], + uint8_t tgt_wwpn[], uint64_t lun, + uint32_t oas_state) +{ + + int rc; + + rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun, + oas_state); + return rc; +} + +/** + * lpfc_oas_lun_show - Return oas enabled luns from a chosen target + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * This routine returns a lun enabled for OAS each time the function + * is called. + * + * Returns: + * SUCCESS: size of formatted string. + * -EFAULT: target or vport wwpn was not set properly. + * -EPERM: oas is not enabled. + **/ +static ssize_t +lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + uint64_t oas_lun; + int len = 0; + + if (!phba->cfg_fof) + return -EPERM; + + if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) + if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)) + return -EFAULT; + + if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) + if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)) + return -EFAULT; + + oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn, + phba->cfg_oas_tgt_wwpn, + &phba->cfg_oas_lun_status); + if (oas_lun != NOT_OAS_ENABLED_LUN) + phba->cfg_oas_flags |= OAS_LUN_VALID; + + len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun); + + return len; +} + +/** + * lpfc_oas_lun_store - Sets the OAS state for lun + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * This function sets the OAS state for lun. Before this function is called, + * the vport wwpn, target wwpn, and oas state need to be set. + * + * Returns: + * SUCCESS: size of formatted string. + * -EFAULT: target or vport wwpn was not set properly. + * -EPERM: oas is not enabled. + * size of formatted string. + **/ +static ssize_t +lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + uint64_t scsi_lun; + ssize_t rc; + + if (!phba->cfg_fof) + return -EPERM; + + if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) + return -EFAULT; + + if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) + return -EFAULT; + + if (!isdigit(buf[0])) + return -EINVAL; + + if (sscanf(buf, "0x%llx", &scsi_lun) != 1) + return -EINVAL; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3372 Try to set vport 0x%llx target 0x%llx lun:%lld " + "with oas set to %d\n", + wwn_to_u64(phba->cfg_oas_vpt_wwpn), + wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun, + phba->cfg_oas_lun_state); + + rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn, + phba->cfg_oas_tgt_wwpn, scsi_lun, + phba->cfg_oas_lun_state); + + if (rc) + return rc; + + return count; +} +static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR, + lpfc_oas_lun_show, lpfc_oas_lun_store); static int lpfc_poll = 0; module_param(lpfc_poll, int, S_IRUGO); @@ -2298,11 +2782,17 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, "FCF Fast failover=1 Priority failover=2"); -int lpfc_enable_rrq; +int lpfc_enable_rrq = 2; module_param(lpfc_enable_rrq, int, S_IRUGO); MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); lpfc_param_show(enable_rrq); -lpfc_param_init(enable_rrq, 0, 0, 1); +/* +# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures +# 0x0 = disabled, XRI/OXID use not tracked. +# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent. +# 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent. +*/ +lpfc_param_init(enable_rrq, 2, 0, 2); static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL); /* @@ -2571,14 +3061,17 @@ LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff, # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters # objects that have been registered with the nameserver after login. */ -LPFC_VPORT_ATTR_R(enable_da_id, 0, 0, 1, +LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1, "Deregister nameserver objects before LOGO"); /* # lun_queue_depth: This parameter is used to limit the number of outstanding -# commands per FCP LUN. Value range is [1,128]. Default value is 30. +# commands per FCP LUN. Value range is [1,512]. Default value is 30. +# If this parameter value is greater than 1/8th the maximum number of exchanges +# supported by the HBA port, then the lun queue depth will be reduced to +# 1/8th the maximum number of exchanges. */ -LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128, +LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512, "Max number of FCP commands we can queue to a specific LUN"); /* @@ -2586,7 +3079,7 @@ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128, # commands per target port. Value range is [10,65535]. Default value is 65535. */ LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535, - "Max number of FCP commands we can queue to a specific target port"); + "Max number of FCP commands we can queue to a specific target port"); /* # hba_queue_depth: This parameter is used to limit the number of outstanding @@ -2791,6 +3284,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3054 lpfc_topology changed from %d to %d\n", prev_val, val); + if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4) + phba->fc_topology_changed = 1; err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); if (err) { phba->cfg_topology = prev_val; @@ -3613,6 +4108,326 @@ lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val) static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR, lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store); +/** + * lpfc_request_firmware_store - Request for Linux generic firmware upgrade + * + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: containing the string the number of vfs to be enabled. + * @count: unused variable. + * + * Description: + * + * Returns: + * length of the buf on success if val is in range the intended mode + * is supported. + * -EINVAL if val out of range or intended mode is not supported. + **/ +static ssize_t +lpfc_request_firmware_upgrade_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int val = 0, rc = -EINVAL; + + /* Sanity check on user data */ + if (!isdigit(buf[0])) + return -EINVAL; + if (sscanf(buf, "%i", &val) != 1) + return -EINVAL; + if (val != 1) + return -EINVAL; + + rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE); + if (rc) + rc = -EPERM; + else + rc = strlen(buf); + return rc; +} + +static int lpfc_req_fw_upgrade; +module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade"); +lpfc_param_show(request_firmware_upgrade) + +/** + * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade + * @phba: lpfc_hba pointer. + * @val: 0 or 1. + * + * Description: + * Set the initial Linux generic firmware upgrade enable or disable flag. + * + * Returns: + * zero if val saved. + * -EINVAL val out of range + **/ +static int +lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val) +{ + if (val >= 0 && val <= 1) { + phba->cfg_request_firmware_upgrade = val; + return 0; + } + return -EINVAL; +} +static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR, + lpfc_request_firmware_upgrade_show, + lpfc_request_firmware_upgrade_store); + +/** + * lpfc_fcp_imax_store + * + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: string with the number of fast-path FCP interrupts per second. + * @count: unused variable. + * + * Description: + * If val is in a valid range [636,651042], then set the adapter's + * maximum number of fast-path FCP interrupts per second. + * + * Returns: + * length of the buf on success if val is in range the intended mode + * is supported. + * -EINVAL if val out of range or intended mode is not supported. + **/ +static ssize_t +lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int val = 0, i; + + /* fcp_imax is only valid for SLI4 */ + if (phba->sli_rev != LPFC_SLI_REV4) + return -EINVAL; + + /* Sanity check on user data */ + if (!isdigit(buf[0])) + return -EINVAL; + if (sscanf(buf, "%i", &val) != 1) + return -EINVAL; + + /* + * Value range for the HBA is [5000,5000000] + * The value for each EQ depends on how many EQs are configured. + */ + if (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX) + return -EINVAL; + + phba->cfg_fcp_imax = (uint32_t)val; + for (i = 0; i < phba->cfg_fcp_io_channel; i += LPFC_MAX_EQ_DELAY) + lpfc_modify_fcp_eq_delay(phba, i); + + return strlen(buf); +} + +/* +# lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second +# for the HBA. +# +# Value range is [5,000 to 5,000,000]. Default value is 50,000. +*/ +static int lpfc_fcp_imax = LPFC_DEF_IMAX; +module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(lpfc_fcp_imax, + "Set the maximum number of FCP interrupts per second per HBA"); +lpfc_param_show(fcp_imax) + +/** + * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable + * @phba: lpfc_hba pointer. + * @val: link speed value. + * + * Description: + * If val is in a valid range [636,651042], then initialize the adapter's + * maximum number of fast-path FCP interrupts per second. + * + * Returns: + * zero if val saved. + * -EINVAL val out of range + **/ +static int +lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) +{ + if (phba->sli_rev != LPFC_SLI_REV4) { + phba->cfg_fcp_imax = 0; + return 0; + } + + if (val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) { + phba->cfg_fcp_imax = val; + return 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3016 fcp_imax: %d out of range, using default\n", val); + phba->cfg_fcp_imax = LPFC_DEF_IMAX; + + return 0; +} + +static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR, + lpfc_fcp_imax_show, lpfc_fcp_imax_store); + +/** + * lpfc_state_show - Display current driver CPU affinity + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains text describing the state of the link. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_vector_map_info *cpup; + int len = 0; + + if ((phba->sli_rev != LPFC_SLI_REV4) || + (phba->intr_type != MSIX)) + return len; + + switch (phba->cfg_fcp_cpu_map) { + case 0: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: No mapping (%d)\n", + phba->cfg_fcp_cpu_map); + return len; + case 1: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: HBA centric mapping (%d): " + "%d online CPUs\n", + phba->cfg_fcp_cpu_map, + phba->sli4_hba.num_online_cpu); + break; + case 2: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: Driver centric mapping (%d): " + "%d online CPUs\n", + phba->cfg_fcp_cpu_map, + phba->sli4_hba.num_online_cpu); + break; + } + + while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) { + cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu]; + + /* margin should fit in this and the truncated message */ + if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) + len += snprintf(buf + len, PAGE_SIZE-len, + "CPU %02d io_chan %02d " + "physid %d coreid %d\n", + phba->sli4_hba.curr_disp_cpu, + cpup->channel_id, cpup->phys_id, + cpup->core_id); + else + len += snprintf(buf + len, PAGE_SIZE-len, + "CPU %02d io_chan %02d " + "physid %d coreid %d IRQ %d\n", + phba->sli4_hba.curr_disp_cpu, + cpup->channel_id, cpup->phys_id, + cpup->core_id, cpup->irq); + + phba->sli4_hba.curr_disp_cpu++; + + /* display max number of CPUs keeping some margin */ + if (phba->sli4_hba.curr_disp_cpu < + phba->sli4_hba.num_present_cpu && + (len >= (PAGE_SIZE - 64))) { + len += snprintf(buf + len, PAGE_SIZE-len, "more...\n"); + break; + } + } + + if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu) + phba->sli4_hba.curr_disp_cpu = 0; + + return len; +} + +/** + * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: one or more lpfc_polling_flags values. + * @count: not used. + * + * Returns: + * -EINVAL - Not implemented yet. + **/ +static ssize_t +lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int status = -EINVAL; + return status; +} + +/* +# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors +# for the HBA. +# +# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2). +# 0 - Do not affinitze IRQ vectors +# 1 - Affintize HBA vectors with respect to each HBA +# (start with CPU0 for each HBA) +# 2 - Affintize HBA vectors with respect to the entire driver +# (round robin thru all CPUs across all HBAs) +*/ +static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; +module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(lpfc_fcp_cpu_map, + "Defines how to map CPUs to IRQ vectors per HBA"); + +/** + * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable + * @phba: lpfc_hba pointer. + * @val: link speed value. + * + * Description: + * If val is in a valid range [0-2], then affinitze the adapter's + * MSIX vectors. + * + * Returns: + * zero if val saved. + * -EINVAL val out of range + **/ +static int +lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val) +{ + if (phba->sli_rev != LPFC_SLI_REV4) { + phba->cfg_fcp_cpu_map = 0; + return 0; + } + + if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) { + phba->cfg_fcp_cpu_map = val; + return 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3326 fcp_cpu_map: %d out of range, using default\n", + val); + phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; + + return 0; +} + +static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR, + lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store); + /* # lpfc_fcp_class: Determines FC class to use for the FCP protocol. # Value range is [2,3]. Default value is 3. @@ -3628,6 +4443,14 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1, "Use ADISC on rediscovery to authenticate FCP devices"); /* +# lpfc_first_burst_size: First burst size to use on the NPorts +# that support first burst. +# Value range is [0,65536]. Default value is 0. +*/ +LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536, + "First burst size for Targets that support first burst"); + +/* # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue # depth. Default value is 0. When the value of this parameter is zero the # SCSI command completion time is not used for controlling I/O queue depth. When @@ -3676,6 +4499,29 @@ static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR, LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); /* +# lpfc_fcp_io_sched: Determine scheduling algrithmn for issuing FCP cmds +# range is [0,1]. Default value is 0. +# For [0], FCP commands are issued to Work Queues ina round robin fashion. +# For [1], FCP commands are issued to a Work Queue associated with the +# current CPU. +# It would be set to 1 by the driver if it's able to set up cpu affinity +# for FCP I/Os through Work Queue associated with the current CPU. Otherwise, +# roundrobin scheduling of FCP I/Os through WQs will be used. +*/ +LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for " + "issuing commands [0] - Round Robin, [1] - Current CPU"); + +/* +# lpfc_fcp2_no_tgt_reset: Determine bus reset behavior +# range is [0,1]. Default value is 0. +# For [0], bus reset issues target reset to ALL devices +# For [1], bus reset issues target reset to non-FCP2 devices +*/ +LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for " + "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset"); + + +/* # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing # cr_delay (msec) or cr_count outstanding commands. cr_delay can take # value [0,63]. cr_count can take value [1,255]. Default value of cr_delay @@ -3729,11 +4575,28 @@ LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands " "during discovery"); /* -# lpfc_max_luns: maximum allowed LUN. +# lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that +# will be scanned by the SCSI midlayer when sequential scanning is +# used; and is also the highest LUN ID allowed when the SCSI midlayer +# parses REPORT_LUN responses. The lpfc driver has no LUN count or +# LUN ID limit, but the SCSI midlayer requires this field for the uses +# above. The lpfc driver limits the default value to 255 for two reasons. +# As it bounds the sequential scan loop, scanning for thousands of luns +# on a target can take minutes of wall clock time. Additionally, +# there are FC targets, such as JBODs, that only recognize 8-bits of +# LUN ID. When they receive a value greater than 8 bits, they chop off +# the high order bits. In other words, they see LUN IDs 0, 256, 512, +# and so on all as LUN ID 0. This causes the linux kernel, which sees +# valid responses at each of the LUN IDs, to believe there are multiple +# devices present, when in fact, there is only 1. +# A customer that is aware of their target behaviors, and the results as +# indicated above, is welcome to increase the lpfc_max_luns value. +# As mentioned, this value is not used by the lpfc driver, only the +# SCSI midlayer. # Value range is [0,65535]. Default value is 255. # NOTE: The SCSI layer might probe all allowed LUN on some old targets. */ -LPFC_VPORT_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN"); +LPFC_VPORT_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID"); /* # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. @@ -3743,6 +4606,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, "Milliseconds driver will wait between polling FCP ring"); /* +# lpfc_task_mgmt_tmo: Maximum time to wait for task management commands +# to complete in seconds. Value range is [5,180], default value is 60. +*/ +LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180, + "Maximum time to wait for task management commands to complete"); +/* # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that # support this feature # 0 = MSI disabled @@ -3754,28 +4623,13 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " "MSI-X (2), if possible"); /* -# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second -# -# Value range is [636,651042]. Default value is 10000. -*/ -LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST, - "Set the maximum number of fast-path FCP interrupts per second"); - -/* -# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues +# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels # -# Value range is [1,31]. Default value is 4. +# Value range is [1,7]. Default value is 4. */ -LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX, - "Set the number of fast-path FCP work queues, if possible"); - -/* -# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues -# -# Value range is [1,7]. Default value is 1. -*/ -LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX, - "Set the number of fast-path FCP event queues, if possible"); +LPFC_ATTR_R(fcp_io_channel, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN, + LPFC_FCP_IO_CHAN_MAX, + "Set the number of FCP I/O channels"); /* # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. @@ -3794,6 +4648,21 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver."); LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat."); /* +# lpfc_EnableXLane: Enable Express Lane Feature +# 0x0 Express Lane Feature disabled +# 0x1 Express Lane Feature enabled +# Value range is [0,1]. Default value is 0. +*/ +LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature."); + +/* +# lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature +# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits) +# Value range is [0x0,0x7f]. Default value is 0 +*/ +LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); + +/* # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) # 0 = BlockGuard disabled (default) # 1 = BlockGuard enabled @@ -3802,12 +4671,28 @@ LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat."); LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); /* +# lpfc_fcp_look_ahead: Look ahead for completions in FCP start routine +# 0 = disabled (default) +# 1 = enabled +# Value range is [0,1]. Default value is 0. +# +# This feature in under investigation and may be supported in the future. +*/ +unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF; + +/* # lpfc_prot_mask: i # - Bit mask of host protection capabilities used to register with the # SCSI mid-layer # - Only meaningful if BG is turned on (lpfc_enable_bg=1). # - Allows you to ultimately specify which profiles to use # - Default will result in registering capabilities for all profiles. +# - SHOST_DIF_TYPE1_PROTECTION 1 +# HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection +# - SHOST_DIX_TYPE0_PROTECTION 8 +# HBA supports DIX Type 0: Host to HBA protection only +# - SHOST_DIX_TYPE1_PROTECTION 16 +# HBA supports DIX Type 1: Host to HBA Type 1 protection # */ unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION | @@ -3820,7 +4705,7 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); /* # lpfc_prot_guard: i # - Bit mask of protection guard types to register with the SCSI mid-layer -# - Guard types are currently either 1) IP checksum 2) T10-DIF CRC +# - Guard types are currently either 1) T10-DIF CRC 2) IP checksum # - Allows you to ultimately specify which profiles to use # - Default will result in registering capabilities for all guard types # @@ -3851,16 +4736,23 @@ MODULE_PARM_DESC(lpfc_delay_discovery, /* * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count - * This value can be set to values between 64 and 256. The default value is + * This value can be set to values between 64 and 4096. The default value is * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE). + * Because of the additional overhead involved in setting up T10-DIF, + * this parameter will be limited to 128 if BlockGuard is enabled under SLI4 + * and will be limited to 512 if BlockGuard is enabled under SLI3. */ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); -LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT, - LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT, - "Max Protection Scatter Gather Segment Count"); +/* + * This parameter will be depricated, the driver cannot limit the + * protection data s/g list. + */ +LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, + LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, + "Max Protection Scatter Gather Segment Count"); struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_bg_info, @@ -3891,10 +4783,13 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_devloss_tmo, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, + &dev_attr_lpfc_first_burst_size, &dev_attr_lpfc_ack0, &dev_attr_lpfc_topology, &dev_attr_lpfc_scan_down, &dev_attr_lpfc_link_speed, + &dev_attr_lpfc_fcp_io_sched, + &dev_attr_lpfc_fcp2_no_tgt_reset, &dev_attr_lpfc_cr_delay, &dev_attr_lpfc_cr_count, &dev_attr_lpfc_multi_ring_support, @@ -3917,16 +4812,24 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_issue_reset, &dev_attr_lpfc_poll, &dev_attr_lpfc_poll_tmo, + &dev_attr_lpfc_task_mgmt_tmo, &dev_attr_lpfc_use_msi, &dev_attr_lpfc_fcp_imax, - &dev_attr_lpfc_fcp_wq_count, - &dev_attr_lpfc_fcp_eq_count, + &dev_attr_lpfc_fcp_cpu_map, + &dev_attr_lpfc_fcp_io_channel, &dev_attr_lpfc_enable_bg, &dev_attr_lpfc_soft_wwnn, &dev_attr_lpfc_soft_wwpn, &dev_attr_lpfc_soft_wwn_enable, &dev_attr_lpfc_enable_hba_reset, &dev_attr_lpfc_enable_hba_heartbeat, + &dev_attr_lpfc_EnableXLane, + &dev_attr_lpfc_XLanePriority, + &dev_attr_lpfc_xlane_lun, + &dev_attr_lpfc_xlane_tgt, + &dev_attr_lpfc_xlane_vpt, + &dev_attr_lpfc_xlane_lun_state, + &dev_attr_lpfc_xlane_lun_status, &dev_attr_lpfc_sg_seg_cnt, &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, @@ -3934,6 +4837,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_aer_support, &dev_attr_lpfc_aer_state_cleanup, &dev_attr_lpfc_sriov_nr_virtfn, + &dev_attr_lpfc_req_fw_upgrade, &dev_attr_lpfc_suppress_link_up, &dev_attr_lpfc_iocb_cnt, &dev_attr_iocb_hw, @@ -3944,6 +4848,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_dss, &dev_attr_lpfc_sriov_hw_max_virtfn, &dev_attr_protocol, + &dev_attr_lpfc_xlane_supported, NULL, }; @@ -3962,6 +4867,7 @@ struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_lpfc_restrict_login, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, + &dev_attr_lpfc_first_burst_size, &dev_attr_lpfc_fdmi_on, &dev_attr_lpfc_max_luns, &dev_attr_nport_evt_cnt, @@ -4883,6 +5789,8 @@ struct fc_function_template lpfc_vport_transport_functions = { void lpfc_get_cfgparam(struct lpfc_hba *phba) { + lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched); + lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset); lpfc_cr_delay_init(phba, lpfc_cr_delay); lpfc_cr_count_init(phba, lpfc_cr_count); lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support); @@ -4892,20 +5800,30 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_topology_init(phba, lpfc_topology); lpfc_link_speed_init(phba, lpfc_link_speed); lpfc_poll_tmo_init(phba, lpfc_poll_tmo); + lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo); lpfc_enable_npiv_init(phba, lpfc_enable_npiv); lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); lpfc_enable_rrq_init(phba, lpfc_enable_rrq); lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); - lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); - lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); + lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); + lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); + lpfc_EnableXLane_init(phba, lpfc_EnableXLane); + if (phba->sli_rev != LPFC_SLI_REV4) + phba->cfg_EnableXLane = 0; + lpfc_XLanePriority_init(phba, lpfc_XLanePriority); + memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t))); + memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t))); + phba->cfg_oas_lun_state = 0; + phba->cfg_oas_lun_status = 0; + phba->cfg_oas_flags = 0; lpfc_enable_bg_init(phba, lpfc_enable_bg); if (phba->sli_rev == LPFC_SLI_REV4) phba->cfg_poll = 0; else - phba->cfg_poll = lpfc_poll; + phba->cfg_poll = lpfc_poll; phba->cfg_soft_wwnn = 0L; phba->cfg_soft_wwpn = 0L; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); @@ -4914,6 +5832,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); lpfc_aer_support_init(phba, lpfc_aer_support); lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn); + lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade); lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt); phba->cfg_enable_dss = 1; @@ -4936,6 +5855,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport) lpfc_restrict_login_init(vport, lpfc_restrict_login); lpfc_fcp_class_init(vport, lpfc_fcp_class); lpfc_use_adisc_init(vport, lpfc_use_adisc); + lpfc_first_burst_size_init(vport, lpfc_first_burst_size); lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time); lpfc_fdmi_on_init(vport, lpfc_fdmi_on); lpfc_discovery_threads_init(vport, lpfc_discovery_threads); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 56a86baece5..5b5c825d957 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2011 Emulex. All rights reserved. * + * Copyright (C) 2009-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -64,18 +64,14 @@ struct lpfc_bsg_event { struct list_head events_to_get; struct list_head events_to_see; - /* job waiting for this event to finish */ - struct fc_bsg_job *set_job; + /* driver data associated with the job */ + void *dd_data; }; struct lpfc_bsg_iocb { struct lpfc_iocbq *cmdiocbq; - struct lpfc_iocbq *rspiocbq; - struct lpfc_dmabuf *bmp; + struct lpfc_dmabuf *rmp; struct lpfc_nodelist *ndlp; - - /* job waiting for this iocb to finish */ - struct fc_bsg_job *set_job; }; struct lpfc_bsg_mbox { @@ -86,20 +82,13 @@ struct lpfc_bsg_mbox { uint32_t mbOffset; /* from app */ uint32_t inExtWLen; /* from app */ uint32_t outExtWLen; /* from app */ - - /* job waiting for this mbox command to finish */ - struct fc_bsg_job *set_job; }; #define MENLO_DID 0x0000FC0E struct lpfc_bsg_menlo { struct lpfc_iocbq *cmdiocbq; - struct lpfc_iocbq *rspiocbq; - struct lpfc_dmabuf *bmp; - - /* job waiting for this iocb to finish */ - struct fc_bsg_job *set_job; + struct lpfc_dmabuf *rmp; }; #define TYPE_EVT 1 @@ -108,6 +97,7 @@ struct lpfc_bsg_menlo { #define TYPE_MENLO 4 struct bsg_job_data { uint32_t type; + struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */ union { struct lpfc_bsg_event *evt; struct lpfc_bsg_iocb iocb; @@ -141,6 +131,149 @@ struct lpfc_dmabufext { uint32_t flag; }; +static void +lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) +{ + struct lpfc_dmabuf *mlast, *next_mlast; + + if (mlist) { + list_for_each_entry_safe(mlast, next_mlast, &mlist->list, + list) { + lpfc_mbuf_free(phba, mlast->virt, mlast->phys); + list_del(&mlast->list); + kfree(mlast); + } + lpfc_mbuf_free(phba, mlist->virt, mlist->phys); + kfree(mlist); + } + return; +} + +static struct lpfc_dmabuf * +lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size, + int outbound_buffers, struct ulp_bde64 *bpl, + int *bpl_entries) +{ + struct lpfc_dmabuf *mlist = NULL; + struct lpfc_dmabuf *mp; + unsigned int bytes_left = size; + + /* Verify we can support the size specified */ + if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE))) + return NULL; + + /* Determine the number of dma buffers to allocate */ + *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 : + size/LPFC_BPL_SIZE); + + /* Allocate dma buffer and place in BPL passed */ + while (bytes_left) { + /* Allocate dma buffer */ + mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!mp) { + if (mlist) + lpfc_free_bsg_buffers(phba, mlist); + return NULL; + } + + INIT_LIST_HEAD(&mp->list); + mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); + + if (!mp->virt) { + kfree(mp); + if (mlist) + lpfc_free_bsg_buffers(phba, mlist); + return NULL; + } + + /* Queue it to a linked list */ + if (!mlist) + mlist = mp; + else + list_add_tail(&mp->list, &mlist->list); + + /* Add buffer to buffer pointer list */ + if (outbound_buffers) + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + else + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); + bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); + bpl->tus.f.bdeSize = (uint16_t) + (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE : + bytes_left); + bytes_left -= bpl->tus.f.bdeSize; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + bpl++; + } + return mlist; +} + +static unsigned int +lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, + struct fc_bsg_buffer *bsg_buffers, + unsigned int bytes_to_transfer, int to_buffers) +{ + + struct lpfc_dmabuf *mp; + unsigned int transfer_bytes, bytes_copied = 0; + unsigned int sg_offset, dma_offset; + unsigned char *dma_address, *sg_address; + LIST_HEAD(temp_list); + struct sg_mapping_iter miter; + unsigned long flags; + unsigned int sg_flags = SG_MITER_ATOMIC; + bool sg_valid; + + list_splice_init(&dma_buffers->list, &temp_list); + list_add(&dma_buffers->list, &temp_list); + sg_offset = 0; + if (to_buffers) + sg_flags |= SG_MITER_FROM_SG; + else + sg_flags |= SG_MITER_TO_SG; + sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, + sg_flags); + local_irq_save(flags); + sg_valid = sg_miter_next(&miter); + list_for_each_entry(mp, &temp_list, list) { + dma_offset = 0; + while (bytes_to_transfer && sg_valid && + (dma_offset < LPFC_BPL_SIZE)) { + dma_address = mp->virt + dma_offset; + if (sg_offset) { + /* Continue previous partial transfer of sg */ + sg_address = miter.addr + sg_offset; + transfer_bytes = miter.length - sg_offset; + } else { + sg_address = miter.addr; + transfer_bytes = miter.length; + } + if (bytes_to_transfer < transfer_bytes) + transfer_bytes = bytes_to_transfer; + if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset)) + transfer_bytes = LPFC_BPL_SIZE - dma_offset; + if (to_buffers) + memcpy(dma_address, sg_address, transfer_bytes); + else + memcpy(sg_address, dma_address, transfer_bytes); + dma_offset += transfer_bytes; + sg_offset += transfer_bytes; + bytes_to_transfer -= transfer_bytes; + bytes_copied += transfer_bytes; + if (sg_offset >= miter.length) { + sg_offset = 0; + sg_valid = sg_miter_next(&miter); + } + } + } + sg_miter_stop(&miter); + local_irq_restore(flags); + list_del_init(&dma_buffers->list); + list_splice(&temp_list, &dma_buffers->list); + return bytes_copied; +} + /** * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler * @phba: Pointer to HBA context object. @@ -166,62 +299,77 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, struct bsg_job_data *dd_data; struct fc_bsg_job *job; IOCB_t *rsp; - struct lpfc_dmabuf *bmp; + struct lpfc_dmabuf *bmp, *cmp, *rmp; struct lpfc_nodelist *ndlp; struct lpfc_bsg_iocb *iocb; unsigned long flags; + unsigned int rsp_size; int rc = 0; + dd_data = cmdiocbq->context1; + + /* Determine if job has been aborted */ spin_lock_irqsave(&phba->ct_ev_lock, flags); - dd_data = cmdiocbq->context2; - if (!dd_data) { - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - lpfc_sli_release_iocbq(phba, cmdiocbq); - return; + job = dd_data->set_job; + if (job) { + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - iocb = &dd_data->context_un.iocb; - job = iocb->set_job; - job->dd_data = NULL; /* so timeout handler does not reply */ + /* Close the timeout handler abort window */ + spin_lock_irqsave(&phba->hbalock, flags); + cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; + spin_unlock_irqrestore(&phba->hbalock, flags); - bmp = iocb->bmp; + iocb = &dd_data->context_un.iocb; + ndlp = iocb->ndlp; + rmp = iocb->rmp; + cmp = cmdiocbq->context2; + bmp = cmdiocbq->context3; rsp = &rspiocbq->iocb; - ndlp = cmdiocbq->context1; - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + /* Copy the completed data or set the error status */ - if (rsp->ulpStatus) { - if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { - switch (rsp->un.ulpWord[4] & 0xff) { - case IOERR_SEQUENCE_TIMEOUT: - rc = -ETIMEDOUT; - break; - case IOERR_INVALID_RPI: - rc = -EFAULT; - break; - default: + if (job) { + if (rsp->ulpStatus) { + if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { + switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { + case IOERR_SEQUENCE_TIMEOUT: + rc = -ETIMEDOUT; + break; + case IOERR_INVALID_RPI: + rc = -EFAULT; + break; + default: + rc = -EACCES; + break; + } + } else { rc = -EACCES; - break; } - } else - rc = -EACCES; - } else - job->reply->reply_payload_rcv_len = - rsp->un.genreq64.bdl.bdeSize; + } else { + rsp_size = rsp->un.genreq64.bdl.bdeSize; + job->reply->reply_payload_rcv_len = + lpfc_bsg_copy_data(rmp, &job->reply_payload, + rsp_size, 0); + } + } + lpfc_free_bsg_buffers(phba, cmp); + lpfc_free_bsg_buffers(phba, rmp); lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + kfree(bmp); lpfc_sli_release_iocbq(phba, cmdiocbq); lpfc_nlp_put(ndlp); - kfree(bmp); kfree(dd_data); - /* make error code available to userspace */ - job->reply->result = rc; - /* complete the job back to userspace */ - job->job_done(job); - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Complete the job if the job is still active */ + + if (job) { + job->reply->result = rc; + job->job_done(job); + } return; } @@ -240,13 +388,11 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) uint32_t timeout; struct lpfc_iocbq *cmdiocbq = NULL; IOCB_t *cmd; - struct lpfc_dmabuf *bmp = NULL; + struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; int request_nseg; int reply_nseg; - struct scatterlist *sgel = NULL; - int numbde; - dma_addr_t busaddr; struct bsg_job_data *dd_data; + unsigned long flags; uint32_t creg_val; int rc = 0; int iocb_stat; @@ -268,54 +414,50 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) goto no_ndlp; } - bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); - if (!bmp) { - rc = -ENOMEM; - goto free_ndlp; - } - if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { rc = -ENODEV; - goto free_bmp; + goto free_ndlp; } cmdiocbq = lpfc_sli_get_iocbq(phba); if (!cmdiocbq) { rc = -ENOMEM; - goto free_bmp; + goto free_ndlp; } cmd = &cmdiocbq->iocb; + + bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!bmp) { + rc = -ENOMEM; + goto free_cmdiocbq; + } bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); if (!bmp->virt) { rc = -ENOMEM; - goto free_cmdiocbq; + goto free_bmp; } INIT_LIST_HEAD(&bmp->list); + bpl = (struct ulp_bde64 *) bmp->virt; - request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { - busaddr = sg_dma_address(sgel); - bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bpl->tus.f.bdeSize = sg_dma_len(sgel); - bpl->tus.w = cpu_to_le32(bpl->tus.w); - bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); - bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); - bpl++; + request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); + cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, + 1, bpl, &request_nseg); + if (!cmp) { + rc = -ENOMEM; + goto free_bmp; } + lpfc_bsg_copy_data(cmp, &job->request_payload, + job->request_payload.payload_len, 1); - reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); - for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { - busaddr = sg_dma_address(sgel); - bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; - bpl->tus.f.bdeSize = sg_dma_len(sgel); - bpl->tus.w = cpu_to_le32(bpl->tus.w); - bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); - bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); - bpl++; + bpl += request_nseg; + reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; + rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, + bpl, &reply_nseg); + if (!rmp) { + rc = -ENOMEM; + goto free_cmp; } cmd->un.genreq64.bdl.ulpIoTag32 = 0; @@ -343,17 +485,21 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) cmd->ulpTimeout = timeout; cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; - cmdiocbq->context1 = ndlp; - cmdiocbq->context2 = dd_data; + cmdiocbq->context1 = dd_data; + cmdiocbq->context2 = cmp; + cmdiocbq->context3 = bmp; + cmdiocbq->context_un.ndlp = ndlp; dd_data->type = TYPE_IOCB; + dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = cmdiocbq; - dd_data->context_un.iocb.set_job = job; - dd_data->context_un.iocb.bmp = bmp; + dd_data->context_un.iocb.ndlp = ndlp; + dd_data->context_un.iocb.rmp = rmp; + job->dd_data = dd_data; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { if (lpfc_readl(phba->HCregaddr, &creg_val)) { rc = -EIO ; - goto free_cmdiocbq; + goto free_rmp; } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); @@ -361,26 +507,35 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) } iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); - if (iocb_stat == IOCB_SUCCESS) + + if (iocb_stat == IOCB_SUCCESS) { + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O had not been completed yet */ + if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { + /* open up abort window to timeout handler */ + cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; + } + spin_unlock_irqrestore(&phba->hbalock, flags); return 0; /* done for now */ - else if (iocb_stat == IOCB_BUSY) + } else if (iocb_stat == IOCB_BUSY) { rc = -EAGAIN; - else + } else { rc = -EIO; - + } /* iocb failed so cleanup */ - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); - - lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + job->dd_data = NULL; -free_cmdiocbq: - lpfc_sli_release_iocbq(phba, cmdiocbq); +free_rmp: + lpfc_free_bsg_buffers(phba, rmp); +free_cmp: + lpfc_free_bsg_buffers(phba, cmp); free_bmp: + if (bmp->virt) + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); kfree(bmp); +free_cmdiocbq: + lpfc_sli_release_iocbq(phba, cmdiocbq); free_ndlp: lpfc_nlp_put(ndlp); no_ndlp: @@ -418,67 +573,73 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, struct fc_bsg_job *job; IOCB_t *rsp; struct lpfc_nodelist *ndlp; - struct lpfc_dmabuf *pbuflist = NULL; + struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL; struct fc_bsg_ctels_reply *els_reply; uint8_t *rjt_data; unsigned long flags; + unsigned int rsp_size; int rc = 0; - spin_lock_irqsave(&phba->ct_ev_lock, flags); dd_data = cmdiocbq->context1; - /* normal completion and timeout crossed paths, already done */ - if (!dd_data) { - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - return; + ndlp = dd_data->context_un.iocb.ndlp; + cmdiocbq->context1 = ndlp; + + /* Determine if job has been aborted */ + spin_lock_irqsave(&phba->ct_ev_lock, flags); + job = dd_data->set_job; + if (job) { + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - cmdiocbq->iocb_flag |= LPFC_IO_WAKE; - if (cmdiocbq->context2 && rspiocbq) - memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, - &rspiocbq->iocb, sizeof(IOCB_t)); + /* Close the timeout handler abort window */ + spin_lock_irqsave(&phba->hbalock, flags); + cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; + spin_unlock_irqrestore(&phba->hbalock, flags); - job = dd_data->context_un.iocb.set_job; - cmdiocbq = dd_data->context_un.iocb.cmdiocbq; - rspiocbq = dd_data->context_un.iocb.rspiocbq; rsp = &rspiocbq->iocb; - ndlp = dd_data->context_un.iocb.ndlp; + pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; + prsp = (struct lpfc_dmabuf *)pcmd->list.next; - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + /* Copy the completed job data or determine the job status if job is + * still active + */ - if (job->reply->result == -EAGAIN) - rc = -EAGAIN; - else if (rsp->ulpStatus == IOSTAT_SUCCESS) - job->reply->reply_payload_rcv_len = - rsp->un.elsreq64.bdl.bdeSize; - else if (rsp->ulpStatus == IOSTAT_LS_RJT) { - job->reply->reply_payload_rcv_len = - sizeof(struct fc_bsg_ctels_reply); - /* LS_RJT data returned in word 4 */ - rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; - els_reply = &job->reply->reply_data.ctels_reply; - els_reply->status = FC_CTELS_STATUS_REJECT; - els_reply->rjt_data.action = rjt_data[3]; - els_reply->rjt_data.reason_code = rjt_data[2]; - els_reply->rjt_data.reason_explanation = rjt_data[1]; - els_reply->rjt_data.vendor_unique = rjt_data[0]; - } else - rc = -EIO; + if (job) { + if (rsp->ulpStatus == IOSTAT_SUCCESS) { + rsp_size = rsp->un.elsreq64.bdl.bdeSize; + job->reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + prsp->virt, + rsp_size); + } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { + job->reply->reply_payload_rcv_len = + sizeof(struct fc_bsg_ctels_reply); + /* LS_RJT data returned in word 4 */ + rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; + els_reply = &job->reply->reply_data.ctels_reply; + els_reply->status = FC_CTELS_STATUS_REJECT; + els_reply->rjt_data.action = rjt_data[3]; + els_reply->rjt_data.reason_code = rjt_data[2]; + els_reply->rjt_data.reason_explanation = rjt_data[1]; + els_reply->rjt_data.vendor_unique = rjt_data[0]; + } else { + rc = -EIO; + } + } - pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3; - lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); - lpfc_sli_release_iocbq(phba, rspiocbq); - lpfc_sli_release_iocbq(phba, cmdiocbq); lpfc_nlp_put(ndlp); + lpfc_els_free_iocb(phba, cmdiocbq); kfree(dd_data); - /* make error code available to userspace */ - job->reply->result = rc; - job->dd_data = NULL; - /* complete the job back to userspace */ - job->job_done(job); - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Complete the job if the job is still active */ + + if (job) { + job->reply->result = rc; + job->job_done(job); + } return; } @@ -496,26 +657,25 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) uint32_t elscmd; uint32_t cmdsize; uint32_t rspsize; - struct lpfc_iocbq *rspiocbq; struct lpfc_iocbq *cmdiocbq; - IOCB_t *rsp; uint16_t rpi = 0; - struct lpfc_dmabuf *pcmd; - struct lpfc_dmabuf *prsp; - struct lpfc_dmabuf *pbuflist = NULL; - struct ulp_bde64 *bpl; - int request_nseg; - int reply_nseg; - struct scatterlist *sgel = NULL; - int numbde; - dma_addr_t busaddr; struct bsg_job_data *dd_data; + unsigned long flags; uint32_t creg_val; int rc = 0; /* in case no data is transferred */ job->reply->reply_payload_rcv_len = 0; + /* verify the els command is not greater than the + * maximum ELS transfer size. + */ + + if (job->request_payload.payload_len > FCELSSIZE) { + rc = -EINVAL; + goto no_dd_data; + } + /* allocate our bsg tracking structure */ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); if (!dd_data) { @@ -525,84 +685,51 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) goto no_dd_data; } - if (!lpfc_nlp_get(ndlp)) { - rc = -ENODEV; - goto free_dd_data; - } - elscmd = job->request->rqst_data.r_els.els_code; cmdsize = job->request_payload.payload_len; rspsize = job->reply_payload.payload_len; - rspiocbq = lpfc_sli_get_iocbq(phba); - if (!rspiocbq) { - lpfc_nlp_put(ndlp); - rc = -ENOMEM; + + if (!lpfc_nlp_get(ndlp)) { + rc = -ENODEV; goto free_dd_data; } - rsp = &rspiocbq->iocb; - rpi = ndlp->nlp_rpi; + /* We will use the allocated dma buffers by prep els iocb for command + * and response to ensure if the job times out and the request is freed, + * we won't be dma into memory that is no longer allocated to for the + * request. + */ cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, elscmd); if (!cmdiocbq) { rc = -EIO; - goto free_rspiocbq; + goto release_ndlp; } - /* prep els iocb set context1 to the ndlp, context2 to the command - * dmabuf, context3 holds the data dmabuf - */ - pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; - prsp = (struct lpfc_dmabuf *) pcmd->list.next; - lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); - kfree(pcmd); - lpfc_mbuf_free(phba, prsp->virt, prsp->phys); - kfree(prsp); - cmdiocbq->context2 = NULL; - - pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3; - bpl = (struct ulp_bde64 *) pbuflist->virt; - - request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { - busaddr = sg_dma_address(sgel); - bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bpl->tus.f.bdeSize = sg_dma_len(sgel); - bpl->tus.w = cpu_to_le32(bpl->tus.w); - bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); - bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); - bpl++; - } + rpi = ndlp->nlp_rpi; - reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); - for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { - busaddr = sg_dma_address(sgel); - bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; - bpl->tus.f.bdeSize = sg_dma_len(sgel); - bpl->tus.w = cpu_to_le32(bpl->tus.w); - bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); - bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); - bpl++; - } - cmdiocbq->iocb.un.elsreq64.bdl.bdeSize = - (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); - cmdiocbq->iocb.ulpContext = rpi; - cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; - cmdiocbq->context1 = NULL; - cmdiocbq->context2 = NULL; + /* Transfer the request payload to allocated command dma buffer */ - cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt, + cmdsize); + + if (phba->sli_rev == LPFC_SLI_REV4) + cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi]; + else + cmdiocbq->iocb.ulpContext = rpi; + cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; cmdiocbq->context1 = dd_data; - cmdiocbq->context2 = rspiocbq; + cmdiocbq->context_un.ndlp = ndlp; + cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; dd_data->type = TYPE_IOCB; + dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = cmdiocbq; - dd_data->context_un.iocb.rspiocbq = rspiocbq; - dd_data->context_un.iocb.set_job = job; - dd_data->context_un.iocb.bmp = NULL; dd_data->context_un.iocb.ndlp = ndlp; + dd_data->context_un.iocb.rmp = NULL; + job->dd_data = dd_data; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { if (lpfc_readl(phba->HCregaddr, &creg_val)) { @@ -613,27 +740,33 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); - lpfc_nlp_put(ndlp); - if (rc == IOCB_SUCCESS) + + if (rc == IOCB_SUCCESS) { + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O had not been completed/released */ + if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { + /* open up abort window to timeout handler */ + cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; + } + spin_unlock_irqrestore(&phba->hbalock, flags); return 0; /* done for now */ - else if (rc == IOCB_BUSY) + } else if (rc == IOCB_BUSY) { rc = -EAGAIN; - else + } else { rc = -EIO; + } -linkdown_err: - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); - - lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); + /* iocb failed so cleanup */ + job->dd_data = NULL; - lpfc_sli_release_iocbq(phba, cmdiocbq); +linkdown_err: + cmdiocbq->context1 = ndlp; + lpfc_els_free_iocb(phba, cmdiocbq); -free_rspiocbq: - lpfc_sli_release_iocbq(phba, rspiocbq); +release_ndlp: + lpfc_nlp_put(ndlp); free_dd_data: kfree(dd_data); @@ -676,6 +809,7 @@ lpfc_bsg_event_free(struct kref *kref) kfree(ed); } + kfree(evt->dd_data); kfree(evt); } @@ -719,6 +853,7 @@ lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) evt->req_id = ev_req_id; evt->reg_id = ev_reg_id; evt->wait_time_stamp = jiffies; + evt->dd_data = NULL; init_waitqueue_head(&evt->wq); kref_init(&evt->kref); return evt; @@ -786,6 +921,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_hbq_entry *hbqe; struct lpfc_sli_ct_request *ct_req; struct fc_bsg_job *job = NULL; + struct bsg_job_data *dd_data = NULL; unsigned long flags; int size = 0; @@ -951,9 +1087,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, spin_lock_irqsave(&phba->ct_ev_lock, flags); if (phba->sli_rev == LPFC_SLI_REV4) { evt_dat->immed_dat = phba->ctx_idx; - phba->ctx_idx = (phba->ctx_idx + 1) % 64; + phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX; /* Provide warning for over-run of the ct_ctx array */ - if (phba->ct_ctx[evt_dat->immed_dat].flags & + if (phba->ct_ctx[evt_dat->immed_dat].valid == UNSOL_VALID) lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, "2717 CT context array entry " @@ -969,7 +1105,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, piocbq->iocb.unsli3.rcvsli3.ox_id; phba->ct_ctx[evt_dat->immed_dat].SID = piocbq->iocb.un.rcvels.remoteID; - phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID; + phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID; } else evt_dat->immed_dat = piocbq->iocb.ulpContext; @@ -982,10 +1118,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } list_move(evt->events_to_see.prev, &evt->events_to_get); - lpfc_bsg_event_unref(evt); - job = evt->set_job; - evt->set_job = NULL; + dd_data = (struct bsg_job_data *)evt->dd_data; + job = dd_data->set_job; + dd_data->set_job = NULL; + lpfc_bsg_event_unref(evt); if (job) { job->reply->reply_payload_rcv_len = size; /* make error code available to userspace */ @@ -1009,6 +1146,47 @@ error_ct_unsol_exit: } /** + * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane + * @phba: Pointer to HBA context object. + * @dmabuf: pointer to a dmabuf that describes the FC sequence + * + * This function handles abort to the CT command toward management plane + * for SLI4 port. + * + * If the pending context of a CT command to management plane present, clears + * such context and returns 1 for handled; otherwise, it returns 0 indicating + * no context exists. + **/ +int +lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) +{ + struct fc_frame_header fc_hdr; + struct fc_frame_header *fc_hdr_ptr = &fc_hdr; + int ctx_idx, handled = 0; + uint16_t oxid, rxid; + uint32_t sid; + + memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); + sid = sli4_sid_from_fc_hdr(fc_hdr_ptr); + oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id); + rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id); + + for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) { + if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID) + continue; + if (phba->ct_ctx[ctx_idx].rxid != rxid) + continue; + if (phba->ct_ctx[ctx_idx].oxid != oxid) + continue; + if (phba->ct_ctx[ctx_idx].SID != sid) + continue; + phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID; + handled = 1; + } + return handled; +} + +/** * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command * @job: SET_EVENT fc_bsg_job **/ @@ -1033,14 +1211,6 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job) goto job_error; } - dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); - if (dd_data == NULL) { - lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, - "2734 Failed allocation of dd_data\n"); - rc = -ENOMEM; - goto job_error; - } - event_req = (struct set_ct_event *) job->request->rqst_data.h_vendor.vendor_cmd; ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & @@ -1050,6 +1220,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job) if (evt->reg_id == event_req->ev_reg_id) { lpfc_bsg_event_ref(evt); evt->wait_time_stamp = jiffies; + dd_data = (struct bsg_job_data *)evt->dd_data; break; } } @@ -1057,6 +1228,13 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job) if (&evt->node == &phba->ct_ev_waiters) { /* no event waiting struct yet - first call */ + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (dd_data == NULL) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2734 Failed allocation of dd_data\n"); + rc = -ENOMEM; + goto job_error; + } evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, event_req->ev_req_id); if (!evt) { @@ -1066,7 +1244,10 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job) rc = -ENOMEM; goto job_error; } - + dd_data->type = TYPE_EVT; + dd_data->set_job = NULL; + dd_data->context_un.evt = evt; + evt->dd_data = (void *)dd_data; spin_lock_irqsave(&phba->ct_ev_lock, flags); list_add(&evt->node, &phba->ct_ev_waiters); lpfc_bsg_event_ref(evt); @@ -1076,9 +1257,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job) spin_lock_irqsave(&phba->ct_ev_lock, flags); evt->waiting = 1; - dd_data->type = TYPE_EVT; - dd_data->context_un.evt = evt; - evt->set_job = job; /* for unsolicited command */ + dd_data->set_job = job; /* for unsolicited command */ job->dd_data = dd_data; /* for fc transport timeout callback*/ spin_unlock_irqrestore(&phba->ct_ev_lock, flags); return 0; /* call job done later */ @@ -1102,7 +1281,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job) struct lpfc_hba *phba = vport->phba; struct get_ct_event *event_req; struct get_ct_event_reply *event_reply; - struct lpfc_bsg_event *evt; + struct lpfc_bsg_event *evt, *evt_next; struct event_data *evt_dat = NULL; unsigned long flags; uint32_t rc = 0; @@ -1122,7 +1301,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job) event_reply = (struct get_ct_event_reply *) job->reply->reply_data.vendor_reply.vendor_rsp; spin_lock_irqsave(&phba->ct_ev_lock, flags); - list_for_each_entry(evt, &phba->ct_ev_waiters, node) { + list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { if (evt->reg_id == event_req->ev_reg_id) { if (list_empty(&evt->events_to_get)) break; @@ -1207,57 +1386,69 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, struct bsg_job_data *dd_data; struct fc_bsg_job *job; IOCB_t *rsp; - struct lpfc_dmabuf *bmp; + struct lpfc_dmabuf *bmp, *cmp; struct lpfc_nodelist *ndlp; unsigned long flags; int rc = 0; + dd_data = cmdiocbq->context1; + + /* Determine if job has been aborted */ spin_lock_irqsave(&phba->ct_ev_lock, flags); - dd_data = cmdiocbq->context2; - /* normal completion and timeout crossed paths, already done */ - if (!dd_data) { - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - return; + job = dd_data->set_job; + if (job) { + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Close the timeout handler abort window */ + spin_lock_irqsave(&phba->hbalock, flags); + cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; + spin_unlock_irqrestore(&phba->hbalock, flags); - job = dd_data->context_un.iocb.set_job; - bmp = dd_data->context_un.iocb.bmp; - rsp = &rspiocbq->iocb; ndlp = dd_data->context_un.iocb.ndlp; + cmp = cmdiocbq->context2; + bmp = cmdiocbq->context3; + rsp = &rspiocbq->iocb; - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); + /* Copy the completed job data or set the error status */ - if (rsp->ulpStatus) { - if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { - switch (rsp->un.ulpWord[4] & 0xff) { - case IOERR_SEQUENCE_TIMEOUT: - rc = -ETIMEDOUT; - break; - case IOERR_INVALID_RPI: - rc = -EFAULT; - break; - default: + if (job) { + if (rsp->ulpStatus) { + if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { + switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { + case IOERR_SEQUENCE_TIMEOUT: + rc = -ETIMEDOUT; + break; + case IOERR_INVALID_RPI: + rc = -EFAULT; + break; + default: + rc = -EACCES; + break; + } + } else { rc = -EACCES; - break; } - } else - rc = -EACCES; - } else - job->reply->reply_payload_rcv_len = - rsp->un.genreq64.bdl.bdeSize; + } else { + job->reply->reply_payload_rcv_len = 0; + } + } + lpfc_free_bsg_buffers(phba, cmp); lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + kfree(bmp); lpfc_sli_release_iocbq(phba, cmdiocbq); lpfc_nlp_put(ndlp); - kfree(bmp); kfree(dd_data); - /* make error code available to userspace */ - job->reply->result = rc; - job->dd_data = NULL; - /* complete the job back to userspace */ - job->job_done(job); - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Complete the job if the job is still active */ + + if (job) { + job->reply->result = rc; + job->job_done(job); + } return; } @@ -1271,13 +1462,15 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, **/ static int lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, - struct lpfc_dmabuf *bmp, int num_entry) + struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp, + int num_entry) { IOCB_t *icmd; struct lpfc_iocbq *ctiocb = NULL; int rc = 0; struct lpfc_nodelist *ndlp = NULL; struct bsg_job_data *dd_data; + unsigned long flags; uint32_t creg_val; /* allocate our bsg tracking structure */ @@ -1314,7 +1507,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, icmd->ulpClass = CLASS3; if (phba->sli_rev == LPFC_SLI_REV4) { /* Do not issue unsol response if oxid not marked as valid */ - if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) { + if (phba->ct_ctx[tag].valid != UNSOL_VALID) { rc = IOCB_ERROR; goto issue_ct_rsp_exit; } @@ -1332,7 +1525,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, /* Check if the ndlp is active */ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { - rc = -IOCB_ERROR; + rc = IOCB_ERROR; goto issue_ct_rsp_exit; } @@ -1340,7 +1533,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, * we respond */ if (!lpfc_nlp_get(ndlp)) { - rc = -IOCB_ERROR; + rc = IOCB_ERROR; goto issue_ct_rsp_exit; } @@ -1348,7 +1541,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; /* The exchange is done, mark the entry as invalid */ - phba->ct_ctx[tag].flags &= ~UNSOL_VALID; + phba->ct_ctx[tag].valid = UNSOL_INVALID; } else icmd->ulpContext = (ushort) tag; @@ -1362,17 +1555,18 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, ctiocb->iocb_cmpl = NULL; ctiocb->iocb_flag |= LPFC_IO_LIBDFC; ctiocb->vport = phba->pport; + ctiocb->context1 = dd_data; + ctiocb->context2 = cmp; ctiocb->context3 = bmp; - + ctiocb->context_un.ndlp = ndlp; ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; - ctiocb->context2 = dd_data; - ctiocb->context1 = ndlp; + dd_data->type = TYPE_IOCB; + dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = ctiocb; - dd_data->context_un.iocb.rspiocbq = NULL; - dd_data->context_un.iocb.set_job = job; - dd_data->context_un.iocb.bmp = bmp; dd_data->context_un.iocb.ndlp = ndlp; + dd_data->context_un.iocb.rmp = NULL; + job->dd_data = dd_data; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { if (lpfc_readl(phba->HCregaddr, &creg_val)) { @@ -1386,8 +1580,19 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); - if (rc == IOCB_SUCCESS) + if (rc == IOCB_SUCCESS) { + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O had not been completed/released */ + if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) { + /* open up abort window to timeout handler */ + ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; + } + spin_unlock_irqrestore(&phba->hbalock, flags); return 0; /* done for now */ + } + + /* iocb failed so cleanup */ + job->dd_data = NULL; issue_ct_rsp_exit: lpfc_sli_release_iocbq(phba, ctiocb); @@ -1409,11 +1614,8 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job) struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) job->request->rqst_data.h_vendor.vendor_cmd; struct ulp_bde64 *bpl; - struct lpfc_dmabuf *bmp = NULL; - struct scatterlist *sgel = NULL; - int request_nseg; - int numbde; - dma_addr_t busaddr; + struct lpfc_dmabuf *bmp = NULL, *cmp = NULL; + int bpl_entries; uint32_t tag = mgmt_resp->tag; unsigned long reqbfrcnt = (unsigned long)job->request_payload.payload_len; @@ -1441,30 +1643,28 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job) INIT_LIST_HEAD(&bmp->list); bpl = (struct ulp_bde64 *) bmp->virt; - request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { - busaddr = sg_dma_address(sgel); - bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bpl->tus.f.bdeSize = sg_dma_len(sgel); - bpl->tus.w = cpu_to_le32(bpl->tus.w); - bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); - bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); - bpl++; + bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64)); + cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, + 1, bpl, &bpl_entries); + if (!cmp) { + rc = -ENOMEM; + goto send_mgmt_rsp_free_bmp; } + lpfc_bsg_copy_data(cmp, &job->request_payload, + job->request_payload.payload_len, 1); - rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg); + rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries); if (rc == IOCB_SUCCESS) return 0; /* done for now */ - /* TBD need to handle a timeout */ - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); rc = -EACCES; - lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + + lpfc_free_bsg_buffers(phba, cmp); send_mgmt_rsp_free_bmp: + if (bmp->virt) + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); kfree(bmp); send_mgmt_rsp_exit: /* make error code available to userspace */ @@ -1514,7 +1714,7 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) scsi_block_requests(shost); } - while (pring->txcmplq_cnt) { + while (!list_empty(&pring->txcmplq)) { if (i++ > 500) /* wait up to 5 seconds */ break; msleep(10); @@ -1710,6 +1910,8 @@ lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag) phba->sli4_hba.lnk_info.lnk_no); link_diag_state = &pmboxq->u.mqe.un.link_diag_state; + bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req, + LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE); bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req, phba->sli4_hba.lnk_info.lnk_no); bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req, @@ -1768,7 +1970,7 @@ lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba) bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp); bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req, - LPFC_DIAG_LOOPBACK_TYPE_SERDES); + LPFC_DIAG_LOOPBACK_TYPE_INTERNAL); mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) { @@ -2345,7 +2547,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, struct lpfc_sli_ct_request *ctreq = NULL; int ret_val = 0; int time_left; - int iocb_stat = 0; + int iocb_stat = IOCB_SUCCESS; unsigned long flags; *txxri = 0; @@ -2421,12 +2623,13 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; cmdiocbq->vport = phba->pport; + cmdiocbq->iocb_cmpl = NULL; iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); - if (iocb_stat) { + if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) { ret_val = -EIO; goto err_get_xri_exit; } @@ -2436,7 +2639,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, evt->wait_time_stamp = jiffies; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); + msecs_to_jiffies(1000 * + ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); if (list_empty(&evt->events_to_see)) ret_val = (time_left) ? -EINTR : -ETIMEDOUT; else { @@ -2473,7 +2677,7 @@ err_get_xri_exit: * @phba: Pointer to HBA context object * * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and. - * retruns the pointer to the buffer. + * returns the pointer to the buffer. **/ static struct lpfc_dmabuf * lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba) @@ -2809,7 +3013,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) uint8_t *ptr = NULL, *rx_databuf = NULL; int rc = 0; int time_left; - int iocb_stat; + int iocb_stat = IOCB_SUCCESS; unsigned long flags; void *dataout = NULL; uint32_t total_mem; @@ -2995,12 +3199,14 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) } cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; cmdiocbq->vport = phba->pport; + cmdiocbq->iocb_cmpl = NULL; iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); - if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) && - (rsp->ulpStatus != IOCB_SUCCESS))) { + if ((iocb_stat != IOCB_SUCCESS) || + ((phba->sli_rev < LPFC_SLI_REV4) && + (rsp->ulpStatus != IOSTAT_SUCCESS))) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "3126 Failed loopback test issue iocb: " "iocb_stat:x%x\n", iocb_stat); @@ -3011,7 +3217,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) evt->waiting = 1; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); + msecs_to_jiffies(1000 * + ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); evt->waiting = 0; if (list_empty(&evt->events_to_see)) { rc = (time_left) ? -EINTR : -ETIMEDOUT; @@ -3054,7 +3261,7 @@ err_loopback_test_exit: lpfc_bsg_event_unref(evt); /* delete */ spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - if (cmdiocbq != NULL) + if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT)) lpfc_sli_release_iocbq(phba, cmdiocbq); if (rspiocbq != NULL) @@ -3146,13 +3353,7 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) unsigned long flags; uint8_t *pmb, *pmb_buf; - spin_lock_irqsave(&phba->ct_ev_lock, flags); dd_data = pmboxq->context1; - /* job already timed out? */ - if (!dd_data) { - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - return; - } /* * The outgoing buffer is readily referred from the dma buffer, @@ -3162,29 +3363,33 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); - job = dd_data->context_un.mbox.set_job; + /* Determine if job has been aborted */ + + spin_lock_irqsave(&phba->ct_ev_lock, flags); + job = dd_data->set_job; + if (job) { + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; + } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Copy the mailbox data to the job if it is still active */ + if (job) { size = job->reply_payload.payload_len; job->reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, pmb_buf, size); - /* need to hold the lock until we set job->dd_data to NULL - * to hold off the timeout handler returning to the mid-layer - * while we are still processing the job. - */ - job->dd_data = NULL; - dd_data->context_un.mbox.set_job = NULL; - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - } else { - dd_data->context_un.mbox.set_job = NULL; - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); } + dd_data->set_job = NULL; mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers); kfree(dd_data); + /* Complete the job if the job is still active */ + if (job) { job->reply->result = 0; job->job_done(job); @@ -3239,6 +3444,7 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, case MBX_DOWN_LOAD: case MBX_UPDATE_CFG: case MBX_KILL_BOARD: + case MBX_READ_TOPOLOGY: case MBX_LOAD_AREA: case MBX_LOAD_EXP_ROM: case MBX_BEACON: @@ -3269,7 +3475,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, } break; case MBX_READ_SPARM64: - case MBX_READ_TOPOLOGY: case MBX_REG_LOGIN: case MBX_REG_LOGIN64: case MBX_CONFIG_PORT: @@ -3330,19 +3535,22 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) struct lpfc_sli_config_mbox *sli_cfg_mbx; uint8_t *pmbx; - spin_lock_irqsave(&phba->ct_ev_lock, flags); dd_data = pmboxq->context1; - /* has the job already timed out? */ - if (!dd_data) { - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - job = NULL; - goto job_done_out; + + /* Determine if job has been aborted */ + spin_lock_irqsave(&phba->ct_ev_lock, flags); + job = dd_data->set_job; + if (job) { + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); /* * The outgoing buffer is readily referred from the dma buffer, * just need to get header part from mailboxq structure. */ + pmb = (uint8_t *)&pmboxq->u.mb; pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; /* Copy the byte swapped response mailbox back to the user */ @@ -3359,21 +3567,18 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); } - job = dd_data->context_un.mbox.set_job; + /* Complete the job if the job is still active */ + if (job) { size = job->reply_payload.payload_len; job->reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, pmb_buf, size); + /* result for successful */ job->reply->result = 0; - job->dd_data = NULL; - /* need to hold the lock util we set job->dd_data to NULL - * to hold off the timeout handler from midlayer to take - * any action. - */ - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2937 SLI_CONFIG ext-buffer maibox command " "(x%x/x%x) complete bsg job done, bsize:%d\n", @@ -3384,20 +3589,18 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) phba->mbox_ext_buf_ctx.mboxType, dma_ebuf, sta_pos_addr, phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); - } else - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - -job_done_out: - if (!job) + } else { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2938 SLI_CONFIG ext-buffer maibox " "command (x%x/x%x) failure, rc:x%x\n", phba->mbox_ext_buf_ctx.nembType, phba->mbox_ext_buf_ctx.mboxType, rc); + } + + /* state change */ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE; kfree(dd_data); - return job; } @@ -3414,8 +3617,10 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct fc_bsg_job *job; + job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); + /* handle the BSG job with mailbox command */ - if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS) + if (!job) pmboxq->u.mb.mbxStatus = MBXERR_ERROR; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, @@ -3423,15 +3628,13 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) "complete, ctxState:x%x, mbxStatus:x%x\n", phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); - job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); - if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1) lpfc_bsg_mbox_ext_session_reset(phba); /* free base driver mailbox structure memory */ mempool_free(pmboxq, phba->mbox_mem_pool); - /* complete the bsg job if we have it */ + /* if the job is still active, call job done */ if (job) job->job_done(job); @@ -3451,8 +3654,10 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct fc_bsg_job *job; + job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); + /* handle the BSG job with the mailbox command */ - if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS) + if (!job) pmboxq->u.mb.mbxStatus = MBXERR_ERROR; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, @@ -3460,13 +3665,11 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) "complete, ctxState:x%x, mbxStatus:x%x\n", phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); - job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); - /* free all memory, including dma buffers */ mempool_free(pmboxq, phba->mbox_mem_pool); lpfc_bsg_mbox_ext_session_reset(phba); - /* complete the bsg job if we have it */ + /* if the job is still active, call job done */ if (job) job->job_done(job); @@ -3712,9 +3915,9 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, /* context fields to callback function */ pmboxq->context1 = dd_data; dd_data->type = TYPE_MBOX; + dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; - dd_data->context_un.mbox.set_job = job; job->dd_data = dd_data; /* state change */ @@ -3881,14 +4084,14 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, /* context fields to callback function */ pmboxq->context1 = dd_data; dd_data->type = TYPE_MBOX; + dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx; - dd_data->context_un.mbox.set_job = job; job->dd_data = dd_data; /* state change */ - phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; + phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, @@ -3904,6 +4107,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, } /* wait for additoinal external buffers */ + job->reply->result = 0; job->job_done(job); return SLI_CONFIG_HANDLED; @@ -3949,6 +4153,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, if (subsys == SLI_CONFIG_SUBSYS_FCOE) { switch (opcode) { case FCOE_OPCODE_READ_FCF: + case FCOE_OPCODE_GET_DPORT_RESULTS: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2957 Handled SLI_CONFIG " "subsys_fcoe, opcode:x%x\n", @@ -3957,6 +4162,8 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, nemb_mse, dmabuf); break; case FCOE_OPCODE_ADD_FCF: + case FCOE_OPCODE_SET_DPORT_MODE: + case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2958 Handled SLI_CONFIG " "subsys_fcoe, opcode:x%x\n", @@ -3975,9 +4182,10 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, } else if (subsys == SLI_CONFIG_SUBSYS_COMN) { switch (opcode) { case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: + case COMN_OPCODE_GET_CNTL_ATTRIBUTES: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3106 Handled SLI_CONFIG " - "subsys_fcoe, opcode:x%x\n", + "subsys_comn, opcode:x%x\n", opcode); rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, nemb_mse, dmabuf); @@ -3985,7 +4193,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, default: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3107 Reject SLI_CONFIG " - "subsys_fcoe, opcode:x%x\n", + "subsys_comn, opcode:x%x\n", opcode); rc = -EPERM; break; @@ -4220,9 +4428,9 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job, /* context fields to callback function */ pmboxq->context1 = dd_data; dd_data->type = TYPE_MBOX; + dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf; - dd_data->context_un.mbox.set_job = job; job->dd_data = dd_data; /* state change */ @@ -4407,7 +4615,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, uint8_t *from; uint32_t size; - /* in case no data is transferred */ job->reply->reply_payload_rcv_len = 0; @@ -4556,7 +4763,12 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, + sizeof(MAILBOX_t)); } } else if (phba->sli_rev == LPFC_SLI_REV4) { - if (pmb->mbxCommand == MBX_DUMP_MEMORY) { + /* Let type 4 (well known data) through because the data is + * returned in varwords[4-8] + * otherwise check the recieve length and fetch the buffer addr + */ + if ((pmb->mbxCommand == MBX_DUMP_MEMORY) && + (pmb->un.varDmp.type != DMP_WELL_KNOWN)) { /* rebuild the command for sli4 using our own buffers * like we do for biu diags */ @@ -4628,9 +4840,9 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, /* setup context field to pass wait_queue pointer to wake function */ pmboxq->context1 = dd_data; dd_data->type = TYPE_MBOX; + dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; - dd_data->context_un.mbox.set_job = job; dd_data->context_un.mbox.ext = ext; dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; @@ -4688,7 +4900,7 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job) if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, - "2737 Mix-and-match backward compability " + "2737 Mix-and-match backward compatibility " "between MBOX_REQ old size:%d and " "new request size:%d\n", (int)(job->request_len - @@ -4744,75 +4956,79 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, struct bsg_job_data *dd_data; struct fc_bsg_job *job; IOCB_t *rsp; - struct lpfc_dmabuf *bmp; + struct lpfc_dmabuf *bmp, *cmp, *rmp; struct lpfc_bsg_menlo *menlo; unsigned long flags; struct menlo_response *menlo_resp; + unsigned int rsp_size; int rc = 0; - spin_lock_irqsave(&phba->ct_ev_lock, flags); dd_data = cmdiocbq->context1; - if (!dd_data) { - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - return; - } - + cmp = cmdiocbq->context2; + bmp = cmdiocbq->context3; menlo = &dd_data->context_un.menlo; - job = menlo->set_job; - job->dd_data = NULL; /* so timeout handler does not reply */ - - spin_lock(&phba->hbalock); - cmdiocbq->iocb_flag |= LPFC_IO_WAKE; - if (cmdiocbq->context2 && rspiocbq) - memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, - &rspiocbq->iocb, sizeof(IOCB_t)); - spin_unlock(&phba->hbalock); - - bmp = menlo->bmp; - rspiocbq = menlo->rspiocbq; + rmp = menlo->rmp; rsp = &rspiocbq->iocb; - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + /* Determine if job has been aborted */ + spin_lock_irqsave(&phba->ct_ev_lock, flags); + job = dd_data->set_job; + if (job) { + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; + } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - /* always return the xri, this would be used in the case - * of a menlo download to allow the data to be sent as a continuation - * of the exchange. - */ - menlo_resp = (struct menlo_response *) - job->reply->reply_data.vendor_reply.vendor_rsp; - menlo_resp->xri = rsp->ulpContext; - if (rsp->ulpStatus) { - if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { - switch (rsp->un.ulpWord[4] & 0xff) { - case IOERR_SEQUENCE_TIMEOUT: - rc = -ETIMEDOUT; - break; - case IOERR_INVALID_RPI: - rc = -EFAULT; - break; - default: + /* Copy the job data or set the failing status for the job */ + + if (job) { + /* always return the xri, this would be used in the case + * of a menlo download to allow the data to be sent as a + * continuation of the exchange. + */ + + menlo_resp = (struct menlo_response *) + job->reply->reply_data.vendor_reply.vendor_rsp; + menlo_resp->xri = rsp->ulpContext; + if (rsp->ulpStatus) { + if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { + switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { + case IOERR_SEQUENCE_TIMEOUT: + rc = -ETIMEDOUT; + break; + case IOERR_INVALID_RPI: + rc = -EFAULT; + break; + default: + rc = -EACCES; + break; + } + } else { rc = -EACCES; - break; } - } else - rc = -EACCES; - } else - job->reply->reply_payload_rcv_len = - rsp->un.genreq64.bdl.bdeSize; + } else { + rsp_size = rsp->un.genreq64.bdl.bdeSize; + job->reply->reply_payload_rcv_len = + lpfc_bsg_copy_data(rmp, &job->reply_payload, + rsp_size, 0); + } + + } - lpfc_mbuf_free(phba, bmp->virt, bmp->phys); - lpfc_sli_release_iocbq(phba, rspiocbq); lpfc_sli_release_iocbq(phba, cmdiocbq); + lpfc_free_bsg_buffers(phba, cmp); + lpfc_free_bsg_buffers(phba, rmp); + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); kfree(bmp); kfree(dd_data); - /* make error code available to userspace */ - job->reply->result = rc; - /* complete the job back to userspace */ - job->job_done(job); - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Complete the job if active */ + + if (job) { + job->reply->result = rc; + job->job_done(job); + } + return; } @@ -4830,17 +5046,14 @@ lpfc_menlo_cmd(struct fc_bsg_job *job) { struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; struct lpfc_hba *phba = vport->phba; - struct lpfc_iocbq *cmdiocbq, *rspiocbq; - IOCB_t *cmd, *rsp; + struct lpfc_iocbq *cmdiocbq; + IOCB_t *cmd; int rc = 0; struct menlo_command *menlo_cmd; struct menlo_response *menlo_resp; - struct lpfc_dmabuf *bmp = NULL; + struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; int request_nseg; int reply_nseg; - struct scatterlist *sgel = NULL; - int numbde; - dma_addr_t busaddr; struct bsg_job_data *dd_data; struct ulp_bde64 *bpl = NULL; @@ -4895,50 +5108,38 @@ lpfc_menlo_cmd(struct fc_bsg_job *job) goto free_dd; } - cmdiocbq = lpfc_sli_get_iocbq(phba); - if (!cmdiocbq) { + bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); + if (!bmp->virt) { rc = -ENOMEM; goto free_bmp; } - rspiocbq = lpfc_sli_get_iocbq(phba); - if (!rspiocbq) { - rc = -ENOMEM; - goto free_cmdiocbq; - } - - rsp = &rspiocbq->iocb; + INIT_LIST_HEAD(&bmp->list); - bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); - if (!bmp->virt) { + bpl = (struct ulp_bde64 *)bmp->virt; + request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); + cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, + 1, bpl, &request_nseg); + if (!cmp) { rc = -ENOMEM; - goto free_rspiocbq; + goto free_bmp; } + lpfc_bsg_copy_data(cmp, &job->request_payload, + job->request_payload.payload_len, 1); - INIT_LIST_HEAD(&bmp->list); - bpl = (struct ulp_bde64 *) bmp->virt; - request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { - busaddr = sg_dma_address(sgel); - bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bpl->tus.f.bdeSize = sg_dma_len(sgel); - bpl->tus.w = cpu_to_le32(bpl->tus.w); - bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); - bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); - bpl++; + bpl += request_nseg; + reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; + rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, + bpl, &reply_nseg); + if (!rmp) { + rc = -ENOMEM; + goto free_cmp; } - reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); - for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { - busaddr = sg_dma_address(sgel); - bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; - bpl->tus.f.bdeSize = sg_dma_len(sgel); - bpl->tus.w = cpu_to_le32(bpl->tus.w); - bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); - bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); - bpl++; + cmdiocbq = lpfc_sli_get_iocbq(phba); + if (!cmdiocbq) { + rc = -ENOMEM; + goto free_rmp; } cmd = &cmdiocbq->iocb; @@ -4960,11 +5161,10 @@ lpfc_menlo_cmd(struct fc_bsg_job *job) cmdiocbq->vport = phba->pport; /* We want the firmware to timeout before we do */ cmd->ulpTimeout = MENLO_TIMEOUT - 5; - cmdiocbq->context3 = bmp; - cmdiocbq->context2 = rspiocbq; cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; cmdiocbq->context1 = dd_data; - cmdiocbq->context2 = rspiocbq; + cmdiocbq->context2 = cmp; + cmdiocbq->context3 = bmp; if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { cmd->ulpCommand = CMD_GEN_REQUEST64_CR; cmd->ulpPU = MENLO_PU; /* 3 */ @@ -4978,29 +5178,25 @@ lpfc_menlo_cmd(struct fc_bsg_job *job) } dd_data->type = TYPE_MENLO; + dd_data->set_job = job; dd_data->context_un.menlo.cmdiocbq = cmdiocbq; - dd_data->context_un.menlo.rspiocbq = rspiocbq; - dd_data->context_un.menlo.set_job = job; - dd_data->context_un.menlo.bmp = bmp; + dd_data->context_un.menlo.rmp = rmp; + job->dd_data = dd_data; rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, MENLO_TIMEOUT - 5); if (rc == IOCB_SUCCESS) return 0; /* done for now */ - /* iocb failed so cleanup */ - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); - - lpfc_mbuf_free(phba, bmp->virt, bmp->phys); - -free_rspiocbq: - lpfc_sli_release_iocbq(phba, rspiocbq); -free_cmdiocbq: lpfc_sli_release_iocbq(phba, cmdiocbq); + +free_rmp: + lpfc_free_bsg_buffers(phba, rmp); +free_cmp: + lpfc_free_bsg_buffers(phba, cmp); free_bmp: + if (bmp->virt) + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); kfree(bmp); free_dd: kfree(dd_data); @@ -5109,70 +5305,99 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb; - struct lpfc_bsg_event *evt; - struct lpfc_bsg_iocb *iocb; - struct lpfc_bsg_mbox *mbox; - struct lpfc_bsg_menlo *menlo; struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; struct bsg_job_data *dd_data; unsigned long flags; + int rc = 0; + LIST_HEAD(completions); + struct lpfc_iocbq *check_iocb, *next_iocb; + + /* if job's driver data is NULL, the command completed or is in the + * the process of completing. In this case, return status to request + * so the timeout is retried. This avoids double completion issues + * and the request will be pulled off the timer queue when the + * command's completion handler executes. Otherwise, prevent the + * command's completion handler from executing the job done callback + * and continue processing to abort the outstanding the command. + */ spin_lock_irqsave(&phba->ct_ev_lock, flags); dd_data = (struct bsg_job_data *)job->dd_data; - /* timeout and completion crossed paths if no dd_data */ - if (!dd_data) { + if (dd_data) { + dd_data->set_job = NULL; + job->dd_data = NULL; + } else { spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - return 0; + return -EAGAIN; } switch (dd_data->type) { case TYPE_IOCB: - iocb = &dd_data->context_un.iocb; - cmdiocb = iocb->cmdiocbq; - /* hint to completion handler that the job timed out */ - job->reply->result = -EAGAIN; + /* Check to see if IOCB was issued to the port or not. If not, + * remove it from the txq queue and call cancel iocbs. + * Otherwise, call abort iotag + */ + cmdiocb = dd_data->context_un.iocb.cmdiocbq; spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - /* this will call our completion handler */ - spin_lock_irq(&phba->hbalock); - lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); - spin_unlock_irq(&phba->hbalock); + + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O abort window is still open */ + if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return -EAGAIN; + } + list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, + list) { + if (check_iocb == cmdiocb) { + list_move_tail(&check_iocb->list, &completions); + break; + } + } + if (list_empty(&completions)) + lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (!list_empty(&completions)) { + lpfc_sli_cancel_iocbs(phba, &completions, + IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); + } break; + case TYPE_EVT: - evt = dd_data->context_un.evt; - /* this event has no job anymore */ - evt->set_job = NULL; - job->dd_data = NULL; - job->reply->reply_payload_rcv_len = 0; - /* Return -EAGAIN which is our way of signallying the - * app to retry. - */ - job->reply->result = -EAGAIN; spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - job->job_done(job); break; + case TYPE_MBOX: - mbox = &dd_data->context_un.mbox; - /* this mbox has no job anymore */ - mbox->set_job = NULL; - job->dd_data = NULL; - job->reply->reply_payload_rcv_len = 0; - job->reply->result = -EAGAIN; - /* the mbox completion handler can now be run */ - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - job->job_done(job); + /* Update the ext buf ctx state if needed */ + if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); break; case TYPE_MENLO: - menlo = &dd_data->context_un.menlo; - cmdiocb = menlo->cmdiocbq; - /* hint to completion handler that the job timed out */ - job->reply->result = -EAGAIN; + /* Check to see if IOCB was issued to the port or not. If not, + * remove it from the txq queue and call cancel iocbs. + * Otherwise, call abort iotag. + */ + cmdiocb = dd_data->context_un.menlo.cmdiocbq; spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - /* this will call our completion handler */ - spin_lock_irq(&phba->hbalock); - lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); - spin_unlock_irq(&phba->hbalock); + + spin_lock_irqsave(&phba->hbalock, flags); + list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, + list) { + if (check_iocb == cmdiocb) { + list_move_tail(&check_iocb->list, &completions); + break; + } + } + if (list_empty(&completions)) + lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (!list_empty(&completions)) { + lpfc_sli_cancel_iocbs(phba, &completions, + IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); + } break; default: spin_unlock_irqrestore(&phba->ct_ev_lock, flags); @@ -5183,5 +5408,5 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) * otherwise an error message will be displayed on the console * so always return success (zero) */ - return 0; + return rc; } diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index edfe61fc52b..928ef609f36 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2010 Emulex. All rights reserved. * + * Copyright (C) 2010-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -231,6 +231,8 @@ struct lpfc_sli_config_emb0_subsys { #define SLI_CONFIG_SUBSYS_FCOE 0x0C #define FCOE_OPCODE_READ_FCF 0x08 #define FCOE_OPCODE_ADD_FCF 0x09 +#define FCOE_OPCODE_SET_DPORT_MODE 0x27 +#define FCOE_OPCODE_GET_DPORT_RESULTS 0x28 }; struct lpfc_sli_config_emb1_subsys { @@ -249,6 +251,7 @@ struct lpfc_sli_config_emb1_subsys { #define COMN_OPCODE_READ_OBJECT_LIST 0xAD #define COMN_OPCODE_DELETE_OBJECT 0xAE #define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES 0x79 +#define COMN_OPCODE_GET_CNTL_ATTRIBUTES 0x20 uint32_t timeout; uint32_t request_length; uint32_t word9; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 26924b7a6cd..db5604f01a1 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -106,6 +106,7 @@ void lpfc_cleanup_discovery_resources(struct lpfc_vport *); void lpfc_cleanup(struct lpfc_vport *); void lpfc_disc_timeout(unsigned long); +int lpfc_unregister_fcf_prep(struct lpfc_hba *); struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); void lpfc_worker_wake_up(struct lpfc_hba *); @@ -164,8 +165,7 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *); void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); -void lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, - struct lpfc_iocbq *); +int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *); int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); void lpfc_fdmi_tmo(unsigned long); @@ -183,10 +183,15 @@ int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int); void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int); int lpfc_online(struct lpfc_hba *); void lpfc_unblock_mgmt_io(struct lpfc_hba *); -void lpfc_offline_prep(struct lpfc_hba *); +void lpfc_offline_prep(struct lpfc_hba *, int); void lpfc_offline(struct lpfc_hba *); void lpfc_reset_hba(struct lpfc_hba *); +int lpfc_fof_queue_create(struct lpfc_hba *); +int lpfc_fof_queue_setup(struct lpfc_hba *); +int lpfc_fof_queue_destroy(struct lpfc_hba *); +irqreturn_t lpfc_sli4_fof_intr_handler(int, void *); + int lpfc_sli_setup(struct lpfc_hba *); int lpfc_sli_queue_setup(struct lpfc_hba *); @@ -196,8 +201,7 @@ irqreturn_t lpfc_sli_intr_handler(int, void *); irqreturn_t lpfc_sli_sp_intr_handler(int, void *); irqreturn_t lpfc_sli_fp_intr_handler(int, void *); irqreturn_t lpfc_sli4_intr_handler(int, void *); -irqreturn_t lpfc_sli4_sp_intr_handler(int, void *); -irqreturn_t lpfc_sli4_fp_intr_handler(int, void *); +irqreturn_t lpfc_sli4_hba_intr_handler(int, void *); void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *); @@ -243,6 +247,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *); int lpfc_mem_alloc(struct lpfc_hba *, int align); +int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *); void lpfc_mem_free(struct lpfc_hba *); void lpfc_mem_free_all(struct lpfc_hba *); void lpfc_stop_vport_timers(struct lpfc_vport *); @@ -254,6 +259,7 @@ int lpfc_sli_handle_fast_ring_event(struct lpfc_hba *, struct lpfc_sli_ring *, uint32_t); +struct lpfc_iocbq *__lpfc_sli_get_iocbq(struct lpfc_hba *); struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *); uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); @@ -272,7 +278,7 @@ int lpfc_sli_host_down(struct lpfc_vport *); int lpfc_sli_hba_down(struct lpfc_hba *); int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); int lpfc_sli_handle_mb_event(struct lpfc_hba *); -void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *); +void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *, int); int lpfc_sli_check_eratt(struct lpfc_hba *); void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, struct lpfc_sli_ring *, uint32_t); @@ -283,6 +289,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); +void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba); void lpfc_sli_hba_iocb_abort(struct lpfc_hba *); void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, @@ -304,6 +311,9 @@ int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *, int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd); int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t, uint64_t, lpfc_ctx_cmd); +int +lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *, + uint16_t, uint64_t, lpfc_ctx_cmd); void lpfc_mbox_timeout(unsigned long); void lpfc_mbox_timeout_handler(struct lpfc_hba *); @@ -390,6 +400,7 @@ extern spinlock_t pgcnt_lock; extern unsigned int pgcnt; extern unsigned int lpfc_prot_mask; extern unsigned char lpfc_prot_guard; +extern unsigned int lpfc_fcp_look_ahead; /* Interface exported by fabric iocb scheduler */ void lpfc_fabric_abort_nport(struct lpfc_nodelist *); @@ -398,7 +409,6 @@ void lpfc_fabric_block_timeout(unsigned long); void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); void lpfc_rampdown_queue_depth(struct lpfc_hba *); void lpfc_ramp_down_queue_handler(struct lpfc_hba *); -void lpfc_ramp_up_queue_handler(struct lpfc_hba *); void lpfc_scsi_dev_block(struct lpfc_hba *); void @@ -426,6 +436,7 @@ int lpfc_bsg_request(struct fc_bsg_job *); int lpfc_bsg_timeout(struct fc_bsg_job *); int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); +int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *); void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *, @@ -456,9 +467,33 @@ int lpfc_sli4_queue_create(struct lpfc_hba *); void lpfc_sli4_queue_destroy(struct lpfc_hba *); void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *, struct sli4_wcqe_xri_aborted *); +void lpfc_sli_abts_recover_port(struct lpfc_vport *, + struct lpfc_nodelist *); int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t); int lpfc_issue_reg_vfi(struct lpfc_vport *); int lpfc_issue_unreg_vfi(struct lpfc_vport *); int lpfc_selective_reset(struct lpfc_hba *); -int lpfc_sli4_read_config(struct lpfc_hba *phba); -int lpfc_scsi_buf_update(struct lpfc_hba *phba); +int lpfc_sli4_read_config(struct lpfc_hba *); +void lpfc_sli4_node_prep(struct lpfc_hba *); +int lpfc_sli4_xri_sgl_update(struct lpfc_hba *); +void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); +uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); +int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); +void lpfc_sli4_offline_eratt(struct lpfc_hba *); + +struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *, + struct lpfc_name *, + struct lpfc_name *, + uint64_t, bool); +void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*); +struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *, + struct list_head *list, + struct lpfc_name *, + struct lpfc_name *, uint64_t); +bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *, + struct lpfc_name *, uint64_t); +bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *, + struct lpfc_name *, uint64_t); +bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *, + struct lpfc_name *, uint64_t *, struct lpfc_name *, + struct lpfc_name *, uint64_t *, uint32_t *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 707081d0a22..da61d8dc044 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2010 Emulex. All rights reserved. * + * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -104,7 +104,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) && - ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) { + ((icmd->un.ulpWord[4] & IOERR_PARAM_MASK) == + IOERR_RCV_BUFFER_WAITING)) { /* Not enough posted buffers; Try posting more buffers */ phba->fc_stat.NoRcvBuf++; if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) @@ -163,37 +164,24 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } /** - * lpfc_sli4_ct_abort_unsol_event - Default handle for sli4 unsol abort + * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler * @phba: Pointer to HBA context object. - * @pring: Pointer to the driver internal I/O ring. - * @piocbq: Pointer to the IOCBQ. + * @dmabuf: pointer to a dmabuf that describes the FC sequence * - * This function serves as the default handler for the sli4 unsolicited - * abort event. It shall be invoked when there is no application interface - * registered unsolicited abort handler. This handler does nothing but - * just simply releases the dma buffer used by the unsol abort event. + * This function serves as the upper level protocol abort handler for CT + * protocol. + * + * Return 1 if abort has been handled, 0 otherwise. **/ -void -lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba, - struct lpfc_sli_ring *pring, - struct lpfc_iocbq *piocbq) +int +lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) { - IOCB_t *icmd = &piocbq->iocb; - struct lpfc_dmabuf *bdeBuf; - uint32_t size; + int handled; - /* Forward abort event to any process registered to receive ct event */ - if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0) - return; + /* CT upper level goes through BSG */ + handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf); - /* If there is no BDE associated with IOCB, there is nothing to do */ - if (icmd->ulpBdeCount == 0) - return; - bdeBuf = piocbq->context2; - piocbq->context2 = NULL; - size = icmd->un.cont64[0].tus.f.bdeSize; - lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size); - lpfc_in_buf_free(phba, bdeBuf); + return handled; } static void @@ -292,7 +280,7 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb) buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3; lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); - ctiocb->context1 = NULL; + ctiocb->context3 = NULL; } lpfc_sli_release_iocbq(phba, ctiocb); return 0; @@ -633,7 +621,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check for retry */ if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT || - irsp->un.ulpWord[4] != IOERR_NO_RESOURCES) + (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != + IOERR_NO_RESOURCES) vport->fc_ns_retry++; /* CT command is being retried */ @@ -783,7 +772,9 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (cmdiocb->retry < LPFC_MAX_NS_RETRY) { retry = 1; if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { - switch (irsp->un.ulpWord[4]) { + switch ((irsp->un.ulpWord[4] & + IOERR_PARAM_MASK)) { + case IOERR_NO_RESOURCES: /* We don't increment the retry * count for this case. @@ -904,12 +895,14 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (irsp->ulpStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, - "0268 NS cmd %x Error (%d %d)\n", + "0268 NS cmd x%x Error (x%x x%x)\n", cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]); if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && - ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) || - (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) + (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == + IOERR_SLI_DOWN) || + ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == + IOERR_SLI_ABORTED))) goto out; retry = cmdiocb->retry; @@ -1076,7 +1069,7 @@ int lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, size_t size) { - char fwrev[16]; + char fwrev[FW_REV_STR_SIZE]; int n; lpfc_decode_firmware_rev(vport->phba, fwrev, 0); @@ -1818,7 +1811,8 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport) if (init_utsname()->nodename[0] != '\0') lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); else - mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); + mod_timer(&vport->fc_fdmitmo, jiffies + + msecs_to_jiffies(1000 * 60)); } return; } @@ -1834,7 +1828,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) uint8_t *fwname; if (phba->sli_rev == LPFC_SLI_REV4) - sprintf(fwrevision, "%s", vp->rev.opFwName); + snprintf(fwrevision, FW_REV_STR_SIZE, "%s", vp->rev.opFwName); else if (vp->rev.rBit) { if (psli->sli_flag & LPFC_SLI_ACTIVE) rev = vp->rev.sli2FwRev; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 3587a3fe8fc..b0aedce3f54 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2007-2011 Emulex. All rights reserved. * + * Copyright (C) 2007-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -490,9 +490,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) len += snprintf(buf+len, size-len, "Ring %d: CMD GetInx:%d (Max:%d Next:%d " "Local:%d flg:x%x) RSP PutInx:%d Max:%d\n", - i, pgpp->cmdGetInx, pring->numCiocb, - pring->next_cmdidx, pring->local_getidx, - pring->flag, pgpp->rspPutInx, pring->numRiocb); + i, pgpp->cmdGetInx, pring->sli.sli3.numCiocb, + pring->sli.sli3.next_cmdidx, + pring->sli.sli3.local_getidx, + pring->flag, pgpp->rspPutInx, + pring->sli.sli3.numRiocb); } if (phba->sli_rev <= LPFC_SLI_REV3) { @@ -557,6 +559,9 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) case NLP_STE_PRLI_ISSUE: statep = "PRLI "; break; + case NLP_STE_LOGO_ISSUE: + statep = "LOGO "; + break; case NLP_STE_UNMAPPED_NODE: statep = "UNMAP "; break; @@ -581,8 +586,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7)); - len += snprintf(buf+len, size-len, "RPI:%03d flag:x%08x ", - ndlp->nlp_rpi, ndlp->nlp_flag); + if (ndlp->nlp_flag & NLP_RPI_REGISTERED) + len += snprintf(buf+len, size-len, "RPI:%03d ", + ndlp->nlp_rpi); + else + len += snprintf(buf+len, size-len, "RPI:none "); + len += snprintf(buf+len, size-len, "flag:x%08x ", + ndlp->nlp_flag); if (!ndlp->nlp_type) len += snprintf(buf+len, size-len, "UNKNOWN_TYPE "); if (ndlp->nlp_type & NLP_FC_NODE) @@ -997,36 +1007,41 @@ lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf, return nbytes; } -static int -lpfc_debugfs_dif_err_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - static ssize_t lpfc_debugfs_dif_err_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct dentry *dent = file->f_dentry; struct lpfc_hba *phba = file->private_data; - char cbuf[16]; + char cbuf[32]; + uint64_t tmp = 0; int cnt = 0; if (dent == phba->debug_writeGuard) - cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wgrd_cnt); + cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt); else if (dent == phba->debug_writeApp) - cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wapp_cnt); + cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt); else if (dent == phba->debug_writeRef) - cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wref_cnt); + cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt); + else if (dent == phba->debug_readGuard) + cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt); else if (dent == phba->debug_readApp) - cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rapp_cnt); + cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt); else if (dent == phba->debug_readRef) - cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rref_cnt); - else if (dent == phba->debug_InjErrLBA) - cnt = snprintf(cbuf, 16, "0x%lx\n", - (unsigned long) phba->lpfc_injerr_lba); - else + cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt); + else if (dent == phba->debug_InjErrNPortID) + cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid); + else if (dent == phba->debug_InjErrWWPN) { + memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name)); + tmp = cpu_to_be64(tmp); + cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp); + } else if (dent == phba->debug_InjErrLBA) { + if (phba->lpfc_injerr_lba == (sector_t)(-1)) + cnt = snprintf(cbuf, 32, "off\n"); + else + cnt = snprintf(cbuf, 32, "0x%llx\n", + (uint64_t) phba->lpfc_injerr_lba); + } else lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0547 Unknown debugfs error injection entry\n"); @@ -1040,7 +1055,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, struct dentry *dent = file->f_dentry; struct lpfc_hba *phba = file->private_data; char dstbuf[32]; - unsigned long tmp; + uint64_t tmp = 0; int size; memset(dstbuf, 0, 32); @@ -1048,7 +1063,12 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, if (copy_from_user(dstbuf, buf, size)) return 0; - if (strict_strtoul(dstbuf, 0, &tmp)) + if (dent == phba->debug_InjErrLBA) { + if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f')) + tmp = (uint64_t)(-1); + } + + if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp))) return 0; if (dent == phba->debug_writeGuard) @@ -1057,13 +1077,20 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp; else if (dent == phba->debug_writeRef) phba->lpfc_injerr_wref_cnt = (uint32_t)tmp; + else if (dent == phba->debug_readGuard) + phba->lpfc_injerr_rgrd_cnt = (uint32_t)tmp; else if (dent == phba->debug_readApp) phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp; else if (dent == phba->debug_readRef) phba->lpfc_injerr_rref_cnt = (uint32_t)tmp; else if (dent == phba->debug_InjErrLBA) phba->lpfc_injerr_lba = (sector_t)tmp; - else + else if (dent == phba->debug_InjErrNPortID) + phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID); + else if (dent == phba->debug_InjErrWWPN) { + tmp = cpu_to_be64(tmp); + memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name)); + } else lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0548 Unknown debugfs error injection entry\n"); @@ -1138,22 +1165,8 @@ out: static loff_t lpfc_debugfs_lseek(struct file *file, loff_t off, int whence) { - struct lpfc_debug *debug; - loff_t pos = -1; - - debug = file->private_data; - - switch (whence) { - case 0: - pos = off; - break; - case 1: - pos = file->f_pos + off; - break; - case 2: - pos = debug->len - off; - } - return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos); + struct lpfc_debug *debug = file->private_data; + return fixed_size_llseek(file, off, whence, debug->len); } /** @@ -1982,207 +1995,396 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; - int len = 0, fcp_qidx; + int len = 0; char *pbuffer; + int x, cnt; + int max_cnt; + struct lpfc_queue *qp = NULL; + if (!debug->buffer) debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL); if (!debug->buffer) return 0; pbuffer = debug->buffer; + max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 128; if (*ppos) return 0; - /* Get slow-path event queue information */ - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Slow-path EQ information:\n"); - if (phba->sli4_hba.sp_eq) { - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tEQID[%02d], " - "QE-COUNT[%04d], QE-SIZE[%04d], " - "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", - phba->sli4_hba.sp_eq->queue_id, - phba->sli4_hba.sp_eq->entry_count, - phba->sli4_hba.sp_eq->entry_size, - phba->sli4_hba.sp_eq->host_index, - phba->sli4_hba.sp_eq->hba_index); - } + spin_lock_irq(&phba->hbalock); + + /* Fast-path event queue */ + if (phba->sli4_hba.hba_eq && phba->cfg_fcp_io_channel) { + cnt = phba->cfg_fcp_io_channel; + + for (x = 0; x < cnt; x++) { + + /* Fast-path EQ */ + qp = phba->sli4_hba.hba_eq[x]; + if (!qp) + goto proc_cq; + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\nHBA EQ info: " + "EQ-STAT[max:x%x noE:x%x " + "bs:x%x proc:x%llx]\n", + qp->q_cnt_1, qp->q_cnt_2, + qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "EQID[%02d], " + "QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, + qp->entry_count, + qp->entry_size, + qp->host_index, + qp->hba_index); + + + /* Reset max counter */ + qp->EQ_max_eqe = 0; + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + if (len >= max_cnt) + goto too_big; +proc_cq: + /* Fast-path FCP CQ */ + qp = phba->sli4_hba.fcp_cq[x]; + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tFCP CQ info: "); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "AssocEQID[%02d]: " + "CQ STAT[max:x%x relw:x%x " + "xabt:x%x wq:x%llx]\n", + qp->assoc_qid, + qp->q_cnt_1, qp->q_cnt_2, + qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tCQID[%02d], " + "QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, qp->entry_count, + qp->entry_size, qp->host_index, + qp->hba_index); + + + /* Reset max counter */ + qp->CQ_max_cqe = 0; + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + if (len >= max_cnt) + goto too_big; + + /* Fast-path FCP WQ */ + qp = phba->sli4_hba.fcp_wq[x]; - /* Get fast-path event queue information */ - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Fast-path EQ information:\n"); - if (phba->sli4_hba.fp_eq) { - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; - fcp_qidx++) { - if (phba->sli4_hba.fp_eq[fcp_qidx]) { + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tFCP WQ info: "); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "AssocCQID[%02d]: " + "WQ-STAT[oflow:x%x posted:x%llx]\n", + qp->assoc_qid, + qp->q_cnt_1, (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tWQID[%02d], " + "QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, + qp->entry_count, + qp->entry_size, + qp->host_index, + qp->hba_index); + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + if (len >= max_cnt) + goto too_big; + + if (x) + continue; + + /* Only EQ 0 has slow path CQs configured */ + + /* Slow-path mailbox CQ */ + qp = phba->sli4_hba.mbx_cq; + if (qp) { + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tMBX CQ info: "); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "AssocEQID[%02d]: " + "CQ-STAT[mbox:x%x relw:x%x " + "xabt:x%x wq:x%llx]\n", + qp->assoc_qid, + qp->q_cnt_1, qp->q_cnt_2, + qp->q_cnt_3, + (unsigned long long)qp->q_cnt_4); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tEQID[%02d], " - "QE-COUNT[%04d], QE-SIZE[%04d], " - "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", - phba->sli4_hba.fp_eq[fcp_qidx]->queue_id, - phba->sli4_hba.fp_eq[fcp_qidx]->entry_count, - phba->sli4_hba.fp_eq[fcp_qidx]->entry_size, - phba->sli4_hba.fp_eq[fcp_qidx]->host_index, - phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); + "\tCQID[%02d], " + "QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, qp->entry_count, + qp->entry_size, qp->host_index, + qp->hba_index); + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + if (len >= max_cnt) + goto too_big; } - } - } - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - - /* Get mailbox complete queue information */ - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Slow-path MBX CQ information:\n"); - if (phba->sli4_hba.mbx_cq) { - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Associated EQID[%02d]:\n", - phba->sli4_hba.mbx_cq->assoc_qid); - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tCQID[%02d], " - "QE-COUNT[%04d], QE-SIZE[%04d], " - "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", - phba->sli4_hba.mbx_cq->queue_id, - phba->sli4_hba.mbx_cq->entry_count, - phba->sli4_hba.mbx_cq->entry_size, - phba->sli4_hba.mbx_cq->host_index, - phba->sli4_hba.mbx_cq->hba_index); - } - /* Get slow-path complete queue information */ - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Slow-path ELS CQ information:\n"); - if (phba->sli4_hba.els_cq) { - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Associated EQID[%02d]:\n", - phba->sli4_hba.els_cq->assoc_qid); - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tCQID [%02d], " - "QE-COUNT[%04d], QE-SIZE[%04d], " - "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", - phba->sli4_hba.els_cq->queue_id, - phba->sli4_hba.els_cq->entry_count, - phba->sli4_hba.els_cq->entry_size, - phba->sli4_hba.els_cq->host_index, - phba->sli4_hba.els_cq->hba_index); - } + /* Slow-path MBOX MQ */ + qp = phba->sli4_hba.mbx_wq; + if (qp) { + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tMBX MQ info: "); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "AssocCQID[%02d]:\n", + phba->sli4_hba.mbx_wq->assoc_qid); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tWQID[%02d], " + "QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, qp->entry_count, + qp->entry_size, qp->host_index, + qp->hba_index); + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + if (len >= max_cnt) + goto too_big; + } - /* Get fast-path complete queue information */ - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Fast-path FCP CQ information:\n"); - fcp_qidx = 0; - if (phba->sli4_hba.fcp_cq) { - do { - if (phba->sli4_hba.fcp_cq[fcp_qidx]) { + /* Slow-path ELS response CQ */ + qp = phba->sli4_hba.els_cq; + if (qp) { len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Associated EQID[%02d]:\n", - phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid); + "\tELS CQ info: "); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tCQID[%02d], " - "QE-COUNT[%04d], QE-SIZE[%04d], " - "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", - phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id, - phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count, - phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size, - phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, - phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); + "AssocEQID[%02d]: " + "CQ-STAT[max:x%x relw:x%x " + "xabt:x%x wq:x%llx]\n", + qp->assoc_qid, + qp->q_cnt_1, qp->q_cnt_2, + qp->q_cnt_3, + (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tCQID [%02d], " + "QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, qp->entry_count, + qp->entry_size, qp->host_index, + qp->hba_index); + + /* Reset max counter */ + qp->CQ_max_cqe = 0; + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + if (len >= max_cnt) + goto too_big; + } + + /* Slow-path ELS WQ */ + qp = phba->sli4_hba.els_wq; + if (qp) { + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tELS WQ info: "); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "AssocCQID[%02d]: " + " WQ-STAT[oflow:x%x " + "posted:x%llx]\n", + qp->assoc_qid, + qp->q_cnt_1, + (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tWQID[%02d], " + "QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, qp->entry_count, + qp->entry_size, qp->host_index, + qp->hba_index); + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + if (len >= max_cnt) + goto too_big; } - } while (++fcp_qidx < phba->cfg_fcp_eq_count); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - } - /* Get mailbox queue information */ - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Slow-path MBX MQ information:\n"); - if (phba->sli4_hba.mbx_wq) { - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Associated CQID[%02d]:\n", - phba->sli4_hba.mbx_wq->assoc_qid); - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tWQID[%02d], " - "QE-COUNT[%04d], QE-SIZE[%04d], " - "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", - phba->sli4_hba.mbx_wq->queue_id, - phba->sli4_hba.mbx_wq->entry_count, - phba->sli4_hba.mbx_wq->entry_size, - phba->sli4_hba.mbx_wq->host_index, - phba->sli4_hba.mbx_wq->hba_index); + if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) { + /* Slow-path RQ header */ + qp = phba->sli4_hba.hdr_rq; + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tRQ info: "); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "AssocCQID[%02d]: " + "RQ-STAT[nopost:x%x nobuf:x%x " + "trunc:x%x rcv:x%llx]\n", + qp->assoc_qid, + qp->q_cnt_1, qp->q_cnt_2, + qp->q_cnt_3, + (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tHQID[%02d], " + "QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]\n", + qp->queue_id, + qp->entry_count, + qp->entry_size, + qp->host_index, + qp->hba_index); + + /* Slow-path RQ data */ + qp = phba->sli4_hba.dat_rq; + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tDQID[%02d], " + "QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]\n", + qp->queue_id, + qp->entry_count, + qp->entry_size, + qp->host_index, + qp->hba_index); + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + } + } } - /* Get slow-path work queue information */ - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Slow-path ELS WQ information:\n"); - if (phba->sli4_hba.els_wq) { - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Associated CQID[%02d]:\n", - phba->sli4_hba.els_wq->assoc_qid); - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tWQID[%02d], " - "QE-COUNT[%04d], QE-SIZE[%04d], " - "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", - phba->sli4_hba.els_wq->queue_id, - phba->sli4_hba.els_wq->entry_count, - phba->sli4_hba.els_wq->entry_size, - phba->sli4_hba.els_wq->host_index, - phba->sli4_hba.els_wq->hba_index); + if (phba->cfg_fof) { + /* FOF EQ */ + qp = phba->sli4_hba.fof_eq; + if (!qp) + goto out; + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\nFOF EQ info: " + "EQ-STAT[max:x%x noE:x%x " + "bs:x%x proc:x%llx]\n", + qp->q_cnt_1, qp->q_cnt_2, + qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "EQID[%02d], " + "QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, + qp->entry_count, + qp->entry_size, + qp->host_index, + qp->hba_index); + + /* Reset max counter */ + qp->EQ_max_eqe = 0; + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + if (len >= max_cnt) + goto too_big; } - /* Get fast-path work queue information */ - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Fast-path FCP WQ information:\n"); - if (phba->sli4_hba.fcp_wq) { - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; - fcp_qidx++) { - if (!phba->sli4_hba.fcp_wq[fcp_qidx]) - continue; + if (phba->cfg_fof) { + + /* OAS CQ */ + qp = phba->sli4_hba.oas_cq; + if (qp) { len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Associated CQID[%02d]:\n", - phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid); + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tOAS CQ info: "); len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tWQID[%02d], " - "QE-COUNT[%04d], WQE-SIZE[%04d], " - "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", - phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id, - phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count, - phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size, - phba->sli4_hba.fcp_wq[fcp_qidx]->host_index, - phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index); + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "AssocEQID[%02d]: " + "CQ STAT[max:x%x relw:x%x " + "xabt:x%x wq:x%llx]\n", + qp->assoc_qid, + qp->q_cnt_1, qp->q_cnt_2, + qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tCQID[%02d], " + "QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, qp->entry_count, + qp->entry_size, qp->host_index, + qp->hba_index); + + /* Reset max counter */ + qp->CQ_max_cqe = 0; + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + if (len >= max_cnt) + goto too_big; } - len += snprintf(pbuffer+len, + + /* OAS WQ */ + qp = phba->sli4_hba.oas_wq; + if (qp) { + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tOAS WQ info: "); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "AssocCQID[%02d]: " + "WQ-STAT[oflow:x%x posted:x%llx]\n", + qp->assoc_qid, + qp->q_cnt_1, (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tWQID[%02d], " + "QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, + qp->entry_count, + qp->entry_size, + qp->host_index, + qp->hba_index); + + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + if (len >= max_cnt) + goto too_big; + } } +out: + spin_unlock_irq(&phba->hbalock); + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); - /* Get receive queue information */ - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Slow-path RQ information:\n"); - if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) { - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Associated CQID[%02d]:\n", - phba->sli4_hba.hdr_rq->assoc_qid); - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tHQID[%02d], " - "QE-COUNT[%04d], QE-SIZE[%04d], " - "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", - phba->sli4_hba.hdr_rq->queue_id, - phba->sli4_hba.hdr_rq->entry_count, - phba->sli4_hba.hdr_rq->entry_size, - phba->sli4_hba.hdr_rq->host_index, - phba->sli4_hba.hdr_rq->hba_index); - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tDQID[%02d], " - "QE-COUNT[%04d], QE-SIZE[%04d], " - "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", - phba->sli4_hba.dat_rq->queue_id, - phba->sli4_hba.dat_rq->entry_count, - phba->sli4_hba.dat_rq->entry_size, - phba->sli4_hba.dat_rq->host_index, - phba->sli4_hba.dat_rq->hba_index); - } +too_big: + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, "Truncated ...\n"); + spin_unlock_irq(&phba->hbalock); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } @@ -2391,31 +2593,21 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, switch (quetp) { case LPFC_IDIAG_EQ: - /* Slow-path event queue */ - if (phba->sli4_hba.sp_eq && - phba->sli4_hba.sp_eq->queue_id == queid) { - /* Sanity check */ - rc = lpfc_idiag_que_param_check( - phba->sli4_hba.sp_eq, index, count); - if (rc) - goto error_out; - idiag.ptr_private = phba->sli4_hba.sp_eq; - goto pass_check; - } - /* Fast-path event queue */ - if (phba->sli4_hba.fp_eq) { - for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) { - if (phba->sli4_hba.fp_eq[qidx] && - phba->sli4_hba.fp_eq[qidx]->queue_id == + /* HBA event queue */ + if (phba->sli4_hba.hba_eq) { + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; + qidx++) { + if (phba->sli4_hba.hba_eq[qidx] && + phba->sli4_hba.hba_eq[qidx]->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( - phba->sli4_hba.fp_eq[qidx], + phba->sli4_hba.hba_eq[qidx], index, count); if (rc) goto error_out; idiag.ptr_private = - phba->sli4_hba.fp_eq[qidx]; + phba->sli4_hba.hba_eq[qidx]; goto pass_check; } } @@ -2462,7 +2654,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, phba->sli4_hba.fcp_cq[qidx]; goto pass_check; } - } while (++qidx < phba->cfg_fcp_eq_count); + } while (++qidx < phba->cfg_fcp_io_channel); } goto error_out; break; @@ -2494,7 +2686,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, } /* FCP work queue */ if (phba->sli4_hba.fcp_wq) { - for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) { + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; + qidx++) { if (!phba->sli4_hba.fcp_wq[qidx]) continue; if (phba->sli4_hba.fcp_wq[qidx]->queue_id == @@ -3517,7 +3710,7 @@ static const struct file_operations lpfc_debugfs_op_dumpDif = { #undef lpfc_debugfs_op_dif_err static const struct file_operations lpfc_debugfs_op_dif_err = { .owner = THIS_MODULE, - .open = lpfc_debugfs_dif_err_open, + .open = simple_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_dif_err_read, .write = lpfc_debugfs_dif_err_write, @@ -3832,6 +4025,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) struct lpfc_hba *phba = vport->phba; char name[64]; uint32_t num, i; + bool pport_setup = false; if (!lpfc_debugfs_enable) return; @@ -3852,6 +4046,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) /* Setup funcX directory for specific HBA PCI function */ snprintf(name, sizeof(name), "fn%d", phba->brd_no); if (!phba->hba_debugfs_root) { + pport_setup = true; phba->hba_debugfs_root = debugfs_create_dir(name, lpfc_debugfs_root); if (!phba->hba_debugfs_root) { @@ -3906,7 +4101,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) goto debug_failed; } } else - phba->debug_dumpHBASlim = NULL; + phba->debug_dumpHostSlim = NULL; /* Setup dumpData */ snprintf(name, sizeof(name), "dumpData"); @@ -3945,6 +4140,28 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; + snprintf(name, sizeof(name), "InjErrNPortID"); + phba->debug_InjErrNPortID = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_InjErrNPortID) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0809 Cannot create debugfs InjErrNPortID\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "InjErrWWPN"); + phba->debug_InjErrWWPN = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_InjErrWWPN) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0810 Cannot create debugfs InjErrWWPN\n"); + goto debug_failed; + } + snprintf(name, sizeof(name), "writeGuardInjErr"); phba->debug_writeGuard = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, @@ -3978,6 +4195,17 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) goto debug_failed; } + snprintf(name, sizeof(name), "readGuardInjErr"); + phba->debug_readGuard = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_readGuard) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0808 Cannot create debugfs readGuard\n"); + goto debug_failed; + } + snprintf(name, sizeof(name), "readAppInjErr"); phba->debug_readApp = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, @@ -4111,6 +4339,14 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } /* + * The following section is for additional directories/files for the + * physical port. + */ + + if (!pport_setup) + goto debug_failed; + + /* * iDiag debugfs root entry points for SLI4 device only */ if (phba->sli_rev < LPFC_SLI_REV4) @@ -4306,6 +4542,14 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */ phba->debug_InjErrLBA = NULL; } + if (phba->debug_InjErrNPortID) { /* InjErrNPortID */ + debugfs_remove(phba->debug_InjErrNPortID); + phba->debug_InjErrNPortID = NULL; + } + if (phba->debug_InjErrWWPN) { + debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */ + phba->debug_InjErrWWPN = NULL; + } if (phba->debug_writeGuard) { debugfs_remove(phba->debug_writeGuard); /* writeGuard */ phba->debug_writeGuard = NULL; @@ -4318,6 +4562,10 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) debugfs_remove(phba->debug_writeRef); /* writeRef */ phba->debug_writeRef = NULL; } + if (phba->debug_readGuard) { + debugfs_remove(phba->debug_readGuard); /* readGuard */ + phba->debug_readGuard = NULL; + } if (phba->debug_readApp) { debugfs_remove(phba->debug_readApp); /* readApp */ phba->debug_readApp = NULL; @@ -4404,3 +4652,47 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) #endif return; } + +/* + * Driver debug utility routines outside of debugfs. The debug utility + * routines implemented here is intended to be used in the instrumented + * debug driver for debugging host or port issues. + */ + +/** + * lpfc_debug_dump_all_queues - dump all the queues with a hba + * @phba: Pointer to HBA context object. + * + * This function dumps entries of all the queues asociated with the @phba. + **/ +void +lpfc_debug_dump_all_queues(struct lpfc_hba *phba) +{ + int fcp_wqidx; + + /* + * Dump Work Queues (WQs) + */ + lpfc_debug_dump_mbx_wq(phba); + lpfc_debug_dump_els_wq(phba); + + for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) + lpfc_debug_dump_fcp_wq(phba, fcp_wqidx); + + lpfc_debug_dump_hdr_rq(phba); + lpfc_debug_dump_dat_rq(phba); + /* + * Dump Complete Queues (CQs) + */ + lpfc_debug_dump_mbx_cq(phba); + lpfc_debug_dump_els_cq(phba); + + for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) + lpfc_debug_dump_fcp_cq(phba, fcp_wqidx); + + /* + * Dump Event Queues (EQs) + */ + for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) + lpfc_debug_dump_hba_eq(phba, fcp_wqidx); +} diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index f83bd944edd..8b2b6a3bfc2 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -36,6 +36,9 @@ /* dumpHostSlim output buffer size */ #define LPFC_DUMPHOSTSLIM_SIZE 4096 +/* dumpSLIqinfo output buffer size */ +#define LPFC_DUMPSLIQINFO_SIZE 4096 + /* hbqinfo output buffer size */ #define LPFC_HBQINFO_SIZE 8192 @@ -267,3 +270,402 @@ struct lpfc_idiag { #define LPFC_DISC_TRC_DISCOVERY 0xef /* common mask for general * discovery */ #endif /* H_LPFC_DEBUG_FS */ + + +/* + * Driver debug utility routines outside of debugfs. The debug utility + * routines implemented here is intended to be used in the instrumented + * debug driver for debugging host or port issues. + */ + +/** + * lpfc_debug_dump_qe - dump an specific entry from a queue + * @q: Pointer to the queue descriptor. + * @idx: Index to the entry on the queue. + * + * This function dumps an entry indexed by @idx from a queue specified by the + * queue descriptor @q. + **/ +static inline void +lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx) +{ + char line_buf[LPFC_LBUF_SZ]; + int i, esize, qe_word_cnt, len; + uint32_t *pword; + + /* sanity checks */ + if (!q) + return; + if (idx >= q->entry_count) + return; + + esize = q->entry_size; + qe_word_cnt = esize / sizeof(uint32_t); + pword = q->qe[idx].address; + + len = 0; + len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx); + if (qe_word_cnt > 8) + printk(KERN_ERR "%s\n", line_buf); + + for (i = 0; i < qe_word_cnt; i++) { + if (!(i % 8)) { + if (i != 0) + printk(KERN_ERR "%s\n", line_buf); + if (qe_word_cnt > 8) { + len = 0; + memset(line_buf, 0, LPFC_LBUF_SZ); + len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, + "%03d: ", i); + } + } + len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ", + ((uint32_t)*pword) & 0xffffffff); + pword++; + } + if (qe_word_cnt <= 8 || (i - 1) % 8) + printk(KERN_ERR "%s\n", line_buf); +} + +/** + * lpfc_debug_dump_q - dump all entries from an specific queue + * @q: Pointer to the queue descriptor. + * + * This function dumps all entries from a queue specified by the queue + * descriptor @q. + **/ +static inline void +lpfc_debug_dump_q(struct lpfc_queue *q) +{ + int idx, entry_count; + + /* sanity check */ + if (!q) + return; + + dev_printk(KERN_ERR, &(((q->phba))->pcidev)->dev, + "%d: [qid:%d, type:%d, subtype:%d, " + "qe_size:%d, qe_count:%d, " + "host_index:%d, port_index:%d]\n", + (q->phba)->brd_no, + q->queue_id, q->type, q->subtype, + q->entry_size, q->entry_count, + q->host_index, q->hba_index); + entry_count = q->entry_count; + for (idx = 0; idx < entry_count; idx++) + lpfc_debug_dump_qe(q, idx); + printk(KERN_ERR "\n"); +} + +/** + * lpfc_debug_dump_fcp_wq - dump all entries from a fcp work queue + * @phba: Pointer to HBA context object. + * @fcp_wqidx: Index to a FCP work queue. + * + * This function dumps all entries from a FCP work queue specified by the + * @fcp_wqidx. + **/ +static inline void +lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx) +{ + /* sanity check */ + if (fcp_wqidx >= phba->cfg_fcp_io_channel) + return; + + printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n", + fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id); + lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[fcp_wqidx]); +} + +/** + * lpfc_debug_dump_fcp_cq - dump all entries from a fcp work queue's cmpl queue + * @phba: Pointer to HBA context object. + * @fcp_wqidx: Index to a FCP work queue. + * + * This function dumps all entries from a FCP complete queue which is + * associated to the FCP work queue specified by the @fcp_wqidx. + **/ +static inline void +lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx) +{ + int fcp_cqidx, fcp_cqid; + + /* sanity check */ + if (fcp_wqidx >= phba->cfg_fcp_io_channel) + return; + + fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; + for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) + if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) + break; + if (phba->intr_type == MSIX) { + if (fcp_cqidx >= phba->cfg_fcp_io_channel) + return; + } else { + if (fcp_cqidx > 0) + return; + } + + printk(KERN_ERR "FCP CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]:\n", + fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, + fcp_cqidx, fcp_cqid); + lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[fcp_cqidx]); +} + +/** + * lpfc_debug_dump_hba_eq - dump all entries from a fcp work queue's evt queue + * @phba: Pointer to HBA context object. + * @fcp_wqidx: Index to a FCP work queue. + * + * This function dumps all entries from a FCP event queue which is + * associated to the FCP work queue specified by the @fcp_wqidx. + **/ +static inline void +lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int fcp_wqidx) +{ + struct lpfc_queue *qdesc; + int fcp_eqidx, fcp_eqid; + int fcp_cqidx, fcp_cqid; + + /* sanity check */ + if (fcp_wqidx >= phba->cfg_fcp_io_channel) + return; + fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; + for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) + if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) + break; + if (phba->intr_type == MSIX) { + if (fcp_cqidx >= phba->cfg_fcp_io_channel) + return; + } else { + if (fcp_cqidx > 0) + return; + } + + fcp_eqidx = fcp_cqidx; + fcp_eqid = phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id; + qdesc = phba->sli4_hba.hba_eq[fcp_eqidx]; + + printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->" + "EQ[Idx:%d|Qid:%d]\n", + fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, + fcp_cqidx, fcp_cqid, fcp_eqidx, fcp_eqid); + lpfc_debug_dump_q(qdesc); +} + +/** + * lpfc_debug_dump_els_wq - dump all entries from the els work queue + * @phba: Pointer to HBA context object. + * + * This function dumps all entries from the ELS work queue. + **/ +static inline void +lpfc_debug_dump_els_wq(struct lpfc_hba *phba) +{ + printk(KERN_ERR "ELS WQ: WQ[Qid:%d]:\n", + phba->sli4_hba.els_wq->queue_id); + lpfc_debug_dump_q(phba->sli4_hba.els_wq); +} + +/** + * lpfc_debug_dump_mbx_wq - dump all entries from the mbox work queue + * @phba: Pointer to HBA context object. + * + * This function dumps all entries from the MBOX work queue. + **/ +static inline void +lpfc_debug_dump_mbx_wq(struct lpfc_hba *phba) +{ + printk(KERN_ERR "MBX WQ: WQ[Qid:%d]\n", + phba->sli4_hba.mbx_wq->queue_id); + lpfc_debug_dump_q(phba->sli4_hba.mbx_wq); +} + +/** + * lpfc_debug_dump_dat_rq - dump all entries from the receive data queue + * @phba: Pointer to HBA context object. + * + * This function dumps all entries from the receive data queue. + **/ +static inline void +lpfc_debug_dump_dat_rq(struct lpfc_hba *phba) +{ + printk(KERN_ERR "DAT RQ: RQ[Qid:%d]\n", + phba->sli4_hba.dat_rq->queue_id); + lpfc_debug_dump_q(phba->sli4_hba.dat_rq); +} + +/** + * lpfc_debug_dump_hdr_rq - dump all entries from the receive header queue + * @phba: Pointer to HBA context object. + * + * This function dumps all entries from the receive header queue. + **/ +static inline void +lpfc_debug_dump_hdr_rq(struct lpfc_hba *phba) +{ + printk(KERN_ERR "HDR RQ: RQ[Qid:%d]\n", + phba->sli4_hba.hdr_rq->queue_id); + lpfc_debug_dump_q(phba->sli4_hba.hdr_rq); +} + +/** + * lpfc_debug_dump_els_cq - dump all entries from the els complete queue + * @phba: Pointer to HBA context object. + * + * This function dumps all entries from the els complete queue. + **/ +static inline void +lpfc_debug_dump_els_cq(struct lpfc_hba *phba) +{ + printk(KERN_ERR "ELS CQ: WQ[Qid:%d]->CQ[Qid:%d]\n", + phba->sli4_hba.els_wq->queue_id, + phba->sli4_hba.els_cq->queue_id); + lpfc_debug_dump_q(phba->sli4_hba.els_cq); +} + +/** + * lpfc_debug_dump_mbx_cq - dump all entries from the mbox complete queue + * @phba: Pointer to HBA context object. + * + * This function dumps all entries from the mbox complete queue. + **/ +static inline void +lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba) +{ + printk(KERN_ERR "MBX CQ: WQ[Qid:%d]->CQ[Qid:%d]\n", + phba->sli4_hba.mbx_wq->queue_id, + phba->sli4_hba.mbx_cq->queue_id); + lpfc_debug_dump_q(phba->sli4_hba.mbx_cq); +} + +/** + * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id + * @phba: Pointer to HBA context object. + * @qid: Work queue identifier. + * + * This function dumps all entries from a work queue identified by the queue + * identifier. + **/ +static inline void +lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid) +{ + int wq_idx; + + for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++) + if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid) + break; + if (wq_idx < phba->cfg_fcp_io_channel) { + printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); + lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]); + return; + } + + if (phba->sli4_hba.els_wq->queue_id == qid) { + printk(KERN_ERR "ELS WQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.els_wq); + } +} + +/** + * lpfc_debug_dump_mq_by_id - dump all entries from a mbox queue by queue id + * @phba: Pointer to HBA context object. + * @qid: Mbox work queue identifier. + * + * This function dumps all entries from a mbox work queue identified by the + * queue identifier. + **/ +static inline void +lpfc_debug_dump_mq_by_id(struct lpfc_hba *phba, int qid) +{ + if (phba->sli4_hba.mbx_wq->queue_id == qid) { + printk(KERN_ERR "MBX WQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.mbx_wq); + } +} + +/** + * lpfc_debug_dump_rq_by_id - dump all entries from a receive queue by queue id + * @phba: Pointer to HBA context object. + * @qid: Receive queue identifier. + * + * This function dumps all entries from a receive queue identified by the + * queue identifier. + **/ +static inline void +lpfc_debug_dump_rq_by_id(struct lpfc_hba *phba, int qid) +{ + if (phba->sli4_hba.hdr_rq->queue_id == qid) { + printk(KERN_ERR "HDR RQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.hdr_rq); + return; + } + if (phba->sli4_hba.dat_rq->queue_id == qid) { + printk(KERN_ERR "DAT RQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.dat_rq); + } +} + +/** + * lpfc_debug_dump_cq_by_id - dump all entries from a cmpl queue by queue id + * @phba: Pointer to HBA context object. + * @qid: Complete queue identifier. + * + * This function dumps all entries from a complete queue identified by the + * queue identifier. + **/ +static inline void +lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid) +{ + int cq_idx = 0; + + do { + if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid) + break; + } while (++cq_idx < phba->cfg_fcp_io_channel); + + if (cq_idx < phba->cfg_fcp_io_channel) { + printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); + lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]); + return; + } + + if (phba->sli4_hba.els_cq->queue_id == qid) { + printk(KERN_ERR "ELS CQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.els_cq); + return; + } + + if (phba->sli4_hba.mbx_cq->queue_id == qid) { + printk(KERN_ERR "MBX CQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.mbx_cq); + } +} + +/** + * lpfc_debug_dump_eq_by_id - dump all entries from an event queue by queue id + * @phba: Pointer to HBA context object. + * @qid: Complete queue identifier. + * + * This function dumps all entries from an event queue identified by the + * queue identifier. + **/ +static inline void +lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid) +{ + int eq_idx; + + for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) { + if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid) + break; + } + + if (eq_idx < phba->cfg_fcp_io_channel) { + printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid); + lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]); + return; + } + +} + +void lpfc_debug_dump_all_queues(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 1d84b63fcca..1a6fe524940 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -116,7 +116,7 @@ struct lpfc_nodelist { atomic_t cmd_pending; uint32_t cmd_qdepth; unsigned long last_change_time; - struct lpfc_node_rrqs active_rrqs; + unsigned long *active_rrqs_xri_bitmap; struct lpfc_scsicmd_bkt *lat_data; /* Latency data */ }; struct lpfc_node_rrq { @@ -145,6 +145,7 @@ struct lpfc_node_rrq { #define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */ #define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ #define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ +#define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */ #define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful ACC */ #define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from @@ -153,6 +154,7 @@ struct lpfc_node_rrq { #define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ #define NLP_SC_REQ 0x20000000 /* Target requires authentication */ +#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */ #define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ /* ndlp usage management macros */ @@ -201,10 +203,11 @@ struct lpfc_node_rrq { #define NLP_STE_ADISC_ISSUE 0x2 /* ADISC was sent to NL_PORT */ #define NLP_STE_REG_LOGIN_ISSUE 0x3 /* REG_LOGIN was issued for NL_PORT */ #define NLP_STE_PRLI_ISSUE 0x4 /* PRLI was sent to NL_PORT */ -#define NLP_STE_UNMAPPED_NODE 0x5 /* PRLI completed from NL_PORT */ -#define NLP_STE_MAPPED_NODE 0x6 /* Identified as a FCP Target */ -#define NLP_STE_NPR_NODE 0x7 /* NPort disappeared */ -#define NLP_STE_MAX_STATE 0x8 +#define NLP_STE_LOGO_ISSUE 0x5 /* LOGO was sent to NL_PORT */ +#define NLP_STE_UNMAPPED_NODE 0x6 /* PRLI completed from NL_PORT */ +#define NLP_STE_MAPPED_NODE 0x7 /* Identified as a FCP Target */ +#define NLP_STE_NPR_NODE 0x8 /* NPort disappeared */ +#define NLP_STE_MAX_STATE 0x9 #define NLP_STE_FREED_NODE 0xff /* node entry was freed to MEM_NLP */ /* For UNUSED_NODE state, the node has just been allocated. diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 7afc757338d..7a5d81a65be 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -29,6 +29,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> + #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" @@ -230,27 +231,46 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, INIT_LIST_HEAD(&pbuflist->list); - icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); - icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); - icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; - icmd->un.elsreq64.remoteID = did; /* DID */ if (expectRsp) { + icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); + icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); + icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); + + icmd->un.elsreq64.remoteID = did; /* DID */ icmd->ulpCommand = CMD_ELS_REQUEST64_CR; - icmd->ulpTimeout = phba->fc_ratov * 2; + if (elscmd == ELS_CMD_FLOGI) + icmd->ulpTimeout = FF_DEF_RATOV * 2; + else + icmd->ulpTimeout = phba->fc_ratov * 2; } else { - icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64); + icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); + icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); + icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64); + icmd->un.xseq64.xmit_els_remoteID = did; /* DID */ icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; } icmd->ulpBdeCount = 1; icmd->ulpLe = 1; icmd->ulpClass = CLASS3; - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { - icmd->un.elsreq64.myID = vport->fc_myDID; + /* + * If we have NPIV enabled, we want to send ELS traffic by VPI. + * For SLI4, since the driver controls VPIs we also want to include + * all ELS pt2pt protocol traffic as well. + */ + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || + ((phba->sli_rev == LPFC_SLI_REV4) && + (vport->fc_flag & FC_PT2PT))) { + + if (expectRsp) { + icmd->un.elsreq64.myID = vport->fc_myDID; + + /* For ELS_REQUEST64_CR, use the VPI by default */ + icmd->ulpContext = phba->vpi_ids[vport->vpi]; + } - /* For ELS_REQUEST64_CR, use the VPI by default */ - icmd->ulpContext = phba->vpi_ids[vport->vpi]; icmd->ulpCt_h = 0; /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ if (elscmd == ELS_CMD_ECHO) @@ -292,16 +312,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, /* Xmit ELS command <elsCmd> to remote NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0116 Xmit ELS command x%x to remote " - "NPORT x%x I/O tag: x%x, port state: x%x\n", + "NPORT x%x I/O tag: x%x, port state:x%x" + " fc_flag:x%x\n", elscmd, did, elsiocb->iotag, - vport->port_state); + vport->port_state, + vport->fc_flag); } else { /* Xmit ELS response <elsCmd> to remote NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0117 Xmit ELS response x%x to remote " - "NPORT x%x I/O tag: x%x, size: x%x\n", + "NPORT x%x I/O tag: x%x, size: x%x " + "port_state x%x fc_flag x%x\n", elscmd, ndlp->nlp_DID, elsiocb->iotag, - cmdSize); + cmdSize, vport->port_state, + vport->fc_flag); } return elsiocb; @@ -438,9 +462,10 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport) int rc = 0; sp = &phba->fc_fabparam; - /* move forward in case of SLI4 FC port loopback test */ + /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ if ((phba->sli_rev == LPFC_SLI_REV4) && - !(phba->link_flag & LS_LOOPBACK_MODE)) { + !(phba->link_flag & LS_LOOPBACK_MODE) && + !(vport->fc_flag & FC_PT2PT)) { ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { rc = -ENODEV; @@ -467,6 +492,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport) vport->port_state = LPFC_FABRIC_CFG_LINK; memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam)); lpfc_reg_vfi(mboxq, vport, dmabuf->phys); + mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; mboxq->vport = vport; mboxq->context1 = dmabuf; @@ -683,6 +709,20 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } } + /* + * For FC we need to do some special processing because of the SLI + * Port's default settings of the Common Service Parameters. + */ + if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) { + /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ + if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed) + lpfc_unregister_fcf_prep(phba); + + /* This should just update the VFI CSPs*/ + if (vport->fc_flag & FC_VFI_REGISTERED) + lpfc_issue_reg_vfi(vport); + } + if (fabric_param_changed && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { @@ -707,14 +747,17 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_sli4_unreg_all_rpis(vport); lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); - vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; - /* - * If VPI is unreged, driver need to do INIT_VPI - * before re-registering - */ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); } + + /* + * For SLI3 and SLI4, the VPI needs to be reregistered in + * response to this fabric parameter change event. + */ + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); } else if ((phba->sli_rev == LPFC_SLI_REV4) && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { /* @@ -789,6 +832,8 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->fc_ratov = FF_DEF_RATOV; rc = memcmp(&vport->fc_portname, &sp->portName, sizeof(vport->fc_portname)); + memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); + if (rc >= 0) { /* This side will initiate the PLOGI */ spin_lock_irq(shost->host_lock); @@ -817,6 +862,17 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, mempool_free(mbox, phba->mbox_mem_pool); goto fail; } + + /* + * For SLI4, the VFI/VPI are registered AFTER the + * Nport with the higher WWPN sends the PLOGI with + * an assigned NPortId. + */ + + /* not equal */ + if ((phba->sli_rev == LPFC_SLI_REV4) && rc) + lpfc_issue_reg_vfi(vport); + /* Decrement ndlp reference count indicating that ndlp can be * safely released when other references to it are done. */ @@ -861,6 +917,23 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_PT2PT; spin_unlock_irq(shost->host_lock); + /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ + if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { + lpfc_unregister_fcf_prep(phba); + + /* The FC_VFI_REGISTERED flag will get clear in the cmpl + * handler for unreg_vfi, but if we don't force the + * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be + * built with the update bit set instead of just the vp bit to + * change the Nport ID. We need to have the vp set and the + * Upd cleared on topology changes. + */ + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + lpfc_issue_reg_vfi(vport); + } /* Start discovery - this should just do CLEAR_LA */ lpfc_disc_start(vport); @@ -925,9 +998,18 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * due to new FCF discovery */ if ((phba->hba_flag & HBA_FIP_SUPPORT) && - (phba->fcf.fcf_flag & FCF_DISCOVERY) && - !((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && - (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) { + (phba->fcf.fcf_flag & FCF_DISCOVERY)) { + if (phba->link_state < LPFC_LINK_UP) + goto stop_rr_fcf_flogi; + if ((phba->fcoe_cvl_eventtag_attn == + phba->fcoe_cvl_eventtag) && + (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && + ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == + IOERR_SLI_ABORTED)) + goto stop_rr_fcf_flogi; + else + phba->fcoe_cvl_eventtag_attn = + phba->fcoe_cvl_eventtag; lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, "2611 FLOGI failed on FCF (x%x), " "status:x%x/x%x, tmo:x%x, perform " @@ -943,6 +1025,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } +stop_rr_fcf_flogi: /* FLOGI failure */ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n", @@ -972,9 +1055,19 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; if ((phba->sli_rev == LPFC_SLI_REV4) && (!(vport->fc_flag & FC_VFI_REGISTERED) || - (vport->fc_prevDID != vport->fc_myDID))) { - if (vport->fc_flag & FC_VFI_REGISTERED) - lpfc_sli4_unreg_all_rpis(vport); + (vport->fc_prevDID != vport->fc_myDID) || + phba->fc_topology_changed)) { + if (vport->fc_flag & FC_VFI_REGISTERED) { + if (phba->fc_topology_changed) { + lpfc_unregister_fcf_prep(phba); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + } else { + lpfc_sli4_unreg_all_rpis(vport); + } + } lpfc_issue_reg_vfi(vport); lpfc_nlp_put(ndlp); goto out; @@ -996,10 +1089,11 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* FLOGI completes successfully */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0101 FLOGI completes successfully " - "Data: x%x x%x x%x x%x\n", + "0101 FLOGI completes successfully, I/O tag:x%x, " + "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag, irsp->un.ulpWord[4], sp->cmn.e_d_tov, - sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); + sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, + vport->port_state, vport->fc_flag); if (vport->port_state == LPFC_FLOGI) { /* @@ -1068,8 +1162,10 @@ flogifail: /* Start discovery */ lpfc_disc_start(vport); } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || - ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) && - (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) && + (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != + IOERR_SLI_ABORTED) && + ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != + IOERR_SLI_DOWN))) && (phba->link_state != LPFC_CLEAR_LA)) { /* If FLOGI failed enable link interrupt. */ lpfc_issue_clear_la(phba, vport); @@ -1137,8 +1233,6 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, sp->cmn.w2.r_a_tov = 0; sp->cmn.virtual_fabric_support = 0; sp->cls1.classValid = 0; - sp->cls2.seqDelivery = 1; - sp->cls3.seqDelivery = 1; if (sp->cmn.fcphLow < FC_PH3) sp->cmn.fcphLow = FC_PH3; if (sp->cmn.fcphHigh < FC_PH3) @@ -1153,7 +1247,13 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Set the fcfi to the fcfi we registered with */ elsiocb->iocb.ulpContext = phba->fcf.fcfi; } + /* Can't do SLI4 class2 without support sequence coalescing */ + sp->cls2.classValid = 0; + sp->cls2.seqDelivery = 0; } else { + /* Historical, setting sequential-delivery bit for SLI3 */ + sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; + sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { sp->cmn.request_multiple_Nport = 1; /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ @@ -1416,7 +1516,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, uint32_t rc, keepDID = 0; int put_node; int put_rport; - struct lpfc_node_rrqs rrq; + unsigned long *active_rrqs_xri_bitmap = NULL; /* Fabric nodes can have the same WWPN so we don't bother searching * by WWPN. Just return the ndlp that was given to us. @@ -1434,46 +1534,73 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) return ndlp; - memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap)); + if (phba->sli_rev == LPFC_SLI_REV4) { + active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, + GFP_KERNEL); + if (active_rrqs_xri_bitmap) + memset(active_rrqs_xri_bitmap, 0, + phba->cfg_rrq_xri_bitmap_sz); + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n", + ndlp, ndlp->nlp_DID, new_ndlp); if (!new_ndlp) { rc = memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)); - if (!rc) + if (!rc) { + if (active_rrqs_xri_bitmap) + mempool_free(active_rrqs_xri_bitmap, + phba->active_rrq_pool); return ndlp; + } new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); - if (!new_ndlp) + if (!new_ndlp) { + if (active_rrqs_xri_bitmap) + mempool_free(active_rrqs_xri_bitmap, + phba->active_rrq_pool); return ndlp; + } lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { rc = memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)); - if (!rc) + if (!rc) { + if (active_rrqs_xri_bitmap) + mempool_free(active_rrqs_xri_bitmap, + phba->active_rrq_pool); return ndlp; + } new_ndlp = lpfc_enable_node(vport, new_ndlp, NLP_STE_UNUSED_NODE); - if (!new_ndlp) + if (!new_ndlp) { + if (active_rrqs_xri_bitmap) + mempool_free(active_rrqs_xri_bitmap, + phba->active_rrq_pool); return ndlp; + } keepDID = new_ndlp->nlp_DID; - if (phba->sli_rev == LPFC_SLI_REV4) - memcpy(&rrq.xri_bitmap, - &new_ndlp->active_rrqs.xri_bitmap, - sizeof(new_ndlp->active_rrqs.xri_bitmap)); + if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap) + memcpy(active_rrqs_xri_bitmap, + new_ndlp->active_rrqs_xri_bitmap, + phba->cfg_rrq_xri_bitmap_sz); } else { keepDID = new_ndlp->nlp_DID; - if (phba->sli_rev == LPFC_SLI_REV4) - memcpy(&rrq.xri_bitmap, - &new_ndlp->active_rrqs.xri_bitmap, - sizeof(new_ndlp->active_rrqs.xri_bitmap)); + if (phba->sli_rev == LPFC_SLI_REV4 && + active_rrqs_xri_bitmap) + memcpy(active_rrqs_xri_bitmap, + new_ndlp->active_rrqs_xri_bitmap, + phba->cfg_rrq_xri_bitmap_sz); } lpfc_unreg_rpi(vport, new_ndlp); new_ndlp->nlp_DID = ndlp->nlp_DID; new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; if (phba->sli_rev == LPFC_SLI_REV4) - memcpy(new_ndlp->active_rrqs.xri_bitmap, - &ndlp->active_rrqs.xri_bitmap, - sizeof(ndlp->active_rrqs.xri_bitmap)); + memcpy(new_ndlp->active_rrqs_xri_bitmap, + ndlp->active_rrqs_xri_bitmap, + phba->cfg_rrq_xri_bitmap_sz); if (ndlp->nlp_flag & NLP_NPR_2B_DISC) new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; @@ -1487,6 +1614,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, /* The new_ndlp is replacing ndlp totally, so we need * to put ndlp on UNUSED list and try to free it. */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3179 PLOGI confirm NEW: %x %x\n", + new_ndlp->nlp_DID, keepDID); /* Fix up the rport accordingly */ rport = ndlp->rport; @@ -1512,30 +1642,43 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, /* Two ndlps cannot have the same did on the nodelist */ ndlp->nlp_DID = keepDID; - if (phba->sli_rev == LPFC_SLI_REV4) - memcpy(&ndlp->active_rrqs.xri_bitmap, - &rrq.xri_bitmap, - sizeof(ndlp->active_rrqs.xri_bitmap)); + if (phba->sli_rev == LPFC_SLI_REV4 && + active_rrqs_xri_bitmap) + memcpy(ndlp->active_rrqs_xri_bitmap, + active_rrqs_xri_bitmap, + phba->cfg_rrq_xri_bitmap_sz); lpfc_drop_node(vport, ndlp); } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3180 PLOGI confirm SWAP: %x %x\n", + new_ndlp->nlp_DID, keepDID); + lpfc_unreg_rpi(vport, ndlp); + /* Two ndlps cannot have the same did */ ndlp->nlp_DID = keepDID; - if (phba->sli_rev == LPFC_SLI_REV4) - memcpy(&ndlp->active_rrqs.xri_bitmap, - &rrq.xri_bitmap, - sizeof(ndlp->active_rrqs.xri_bitmap)); - lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + if (phba->sli_rev == LPFC_SLI_REV4 && + active_rrqs_xri_bitmap) + memcpy(ndlp->active_rrqs_xri_bitmap, + active_rrqs_xri_bitmap, + phba->cfg_rrq_xri_bitmap_sz); + /* Since we are swapping the ndlp passed in with the new one - * and the did has already been swapped, copy over the - * state and names. + * and the did has already been swapped, copy over state. + * The new WWNs are already in new_ndlp since thats what + * we looked it up by in the begining of this routine. */ - memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname, - sizeof(struct lpfc_name)); - memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename, - sizeof(struct lpfc_name)); new_ndlp->nlp_state = ndlp->nlp_state; + + /* Since we are switching over to the new_ndlp, the old + * ndlp should be put in the NPR state, unless we have + * already started re-discovery on it. + */ + if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || + (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + /* Fix up the rport accordingly */ rport = ndlp->rport; if (rport) { @@ -1550,6 +1693,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, put_device(&rport->dev); } } + if (phba->sli_rev == LPFC_SLI_REV4 && + active_rrqs_xri_bitmap) + mempool_free(active_rrqs_xri_bitmap, + phba->active_rrq_pool); return new_ndlp; } @@ -2004,6 +2151,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } npr->estabImagePair = 1; npr->readXferRdyDis = 1; + if (vport->cfg_first_burst_size) + npr->writeXferRdyDis = 1; /* For FCP support */ npr->prliType = PRLI_FCP_TYPE; @@ -2327,6 +2476,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, IOCB_t *irsp; struct lpfc_sli *psli; struct lpfcMboxq *mbox; + unsigned long flags; + uint32_t skip_recovery = 0; psli = &phba->sli; /* we pass cmdiocb to state machine which needs rspiocb as well */ @@ -2341,47 +2492,52 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, "LOGO cmpl: status:x%x/x%x did:x%x", irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); + /* LOGO completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0105 LOGO completes to NPort x%x " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout, vport->num_disc_nodes); - /* Check to see if link went down during discovery */ - if (lpfc_els_chk_latt(vport)) + + if (lpfc_els_chk_latt(vport)) { + skip_recovery = 1; goto out; + } + /* Check to see if link went down during discovery */ if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { /* NLP_EVT_DEVICE_RM should unregister the RPI * which should abort all outstanding IOs. */ lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); + skip_recovery = 1; goto out; } if (irsp->ulpStatus) { /* Check for retry */ - if (lpfc_els_retry(phba, cmdiocb, rspiocb)) + if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ + skip_recovery = 1; goto out; + } /* LOGO failed */ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "2756 LOGO failure DID:%06X Status:x%x/x%x\n", ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ - if (lpfc_error_lost_link(irsp)) + if (lpfc_error_lost_link(irsp)) { + skip_recovery = 1; goto out; - else - lpfc_disc_state_machine(vport, ndlp, cmdiocb, - NLP_EVT_CMPL_LOGO); - } else - /* Good status, call state machine. - * This will unregister the rpi if needed. - */ - lpfc_disc_state_machine(vport, ndlp, cmdiocb, - NLP_EVT_CMPL_LOGO); + } + } + + /* Call state machine. This will unregister the rpi if needed. */ + lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); + out: lpfc_els_free_iocb(phba, cmdiocb); /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */ @@ -2396,9 +2552,30 @@ out: if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); + skip_recovery = 1; } } } + + /* + * If the node is a target, the handling attempts to recover the port. + * For any other port type, the rpi is unregistered as an implicit + * LOGO. + */ + if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) { + lpfc_cancel_retry_delay_tmo(vport, ndlp); + spin_lock_irqsave(shost->host_lock, flags); + ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irqrestore(shost->host_lock, flags); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3187 LOGO completes to NPort x%x: Start " + "Recovery Data: x%x x%x x%x x%x\n", + ndlp->nlp_DID, irsp->ulpStatus, + irsp->un.ulpWord[4], irsp->ulpTimeout, + vport->num_disc_nodes); + lpfc_disc_start(vport); + } return; } @@ -2461,10 +2638,27 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, "Issue LOGO: did:x%x", ndlp->nlp_DID, 0, 0); + /* + * If we are issuing a LOGO, we may try to recover the remote NPort + * by issuing a PLOGI later. Even though we issue ELS cmds by the + * VPI, if we have a valid RPI, and that RPI gets unreg'ed while + * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI + * for that ELS cmd. To avoid this situation, lets get rid of the + * RPI right now, before any ELS cmds are sent. + */ + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_ISSUE_LOGO; + spin_unlock_irq(shost->host_lock); + if (lpfc_unreg_rpi(vport, ndlp)) { + lpfc_els_free_iocb(phba, elsiocb); + return 0; + } + phba->fc_stat.elsXmitLOGO++; elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_LOGO_SND; + ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; spin_unlock_irq(shost->host_lock); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); @@ -2607,6 +2801,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) /* This will cause the callback-function lpfc_cmpl_els_cmd to * trigger the release of node. */ + lpfc_nlp_put(ndlp); return 0; } @@ -2880,7 +3075,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) case ELS_CMD_LOGO: if (!lpfc_issue_els_logo(vport, ndlp, retry)) { ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); } break; case ELS_CMD_FDISC: @@ -2963,11 +3158,11 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * ABTS we cannot generate and RRQ. */ lpfc_set_rrq_active(phba, ndlp, - cmdiocb->sli4_xritag, 0, 0); + cmdiocb->sli4_lxritag, 0, 0); } break; case IOSTAT_LOCAL_REJECT: - switch ((irsp->un.ulpWord[4] & 0xff)) { + switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) { case IOERR_LOOP_OPEN_FAILURE: if (cmd == ELS_CMD_FLOGI) { if (PCI_DEVICE_ID_HORNET == @@ -3010,6 +3205,13 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, case IOERR_SEQUENCE_TIMEOUT: case IOERR_INVALID_RPI: + if (cmd == ELS_CMD_PLOGI && + did == NameServer_DID) { + /* Continue forever if plogi to */ + /* the nameserver fails */ + maxretry = 0; + delay = 100; + } retry = 1; break; } @@ -3054,7 +3256,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, retry = 1; break; } - if (cmd == ELS_CMD_PLOGI) { + if ((cmd == ELS_CMD_PLOGI) || + (cmd == ELS_CMD_PRLI)) { delay = 1000; maxretry = lpfc_max_els_tries + 1; retry = 1; @@ -3174,7 +3377,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || - ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) { + ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != + IOERR_NO_RESOURCES))) { /* Don't reset timer for no resources */ /* If discovery / RSCN timer is running, reset it */ @@ -3198,7 +3402,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ndlp->nlp_prev_state = ndlp->nlp_state; if (cmd == ELS_CMD_PRLI) lpfc_nlp_set_state(vport, ndlp, - NLP_STE_REG_LOGIN_ISSUE); + NLP_STE_PRLI_ISSUE); else lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -3233,7 +3437,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, return 1; case ELS_CMD_LOGO: ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); return 1; } @@ -3493,13 +3697,17 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); - if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { - lpfc_nlp_put(ndlp); - /* This is the end of the default RPI cleanup logic for this - * ndlp. If no other discovery threads are using this ndlp. - * we should free all resources associated with it. - */ - lpfc_nlp_not_used(ndlp); + if (ndlp) { + if (NLP_CHK_NODE_ACT(ndlp)) { + lpfc_nlp_put(ndlp); + /* This is the end of the default RPI cleanup logic for + * this ndlp. If no other discovery threads are using + * this ndlp, free all resources associated with it. + */ + lpfc_nlp_not_used(ndlp); + } else { + lpfc_drop_node(ndlp->vport, ndlp); + } } return; @@ -3794,10 +4002,11 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, /* Xmit ELS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0128 Xmit ELS ACC response tag x%x, XRI: x%x, " - "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n", + "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x " + "fc_flag x%x\n", elsiocb->iotag, elsiocb->iocb.ulpContext, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, - ndlp->nlp_rpi); + ndlp->nlp_rpi, vport->fc_flag); if (ndlp->nlp_flag & NLP_LOGO_ACC) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_LOGO_ACC; @@ -4906,6 +5115,8 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct ls_rjt stat; uint32_t cmd, did; int rc; + uint32_t fc_flag = 0; + uint32_t port_state = 0; cmd = *lp++; sp = (struct serv_parm *) lp; @@ -4927,8 +5138,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, return 1; } - did = Fabric_DID; - if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) { /* For a FLOGI we accept, then if our portname is greater * then the remote portname we initiate Nport login. @@ -4967,26 +5176,91 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_PT2PT_PLOGI; spin_unlock_irq(shost->host_lock); - } + + /* If we have the high WWPN we can assign our own + * myDID; otherwise, we have to WAIT for a PLOGI + * from the remote NPort to find out what it + * will be. + */ + vport->fc_myDID = PT2PT_LocalID; + } else + vport->fc_myDID = PT2PT_RemoteID; + + /* + * The vport state should go to LPFC_FLOGI only + * AFTER we issue a FLOGI, not receive one. + */ spin_lock_irq(shost->host_lock); + fc_flag = vport->fc_flag; + port_state = vport->port_state; vport->fc_flag |= FC_PT2PT; vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + vport->port_state = LPFC_FLOGI; spin_unlock_irq(shost->host_lock); + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3311 Rcv Flogi PS x%x new PS x%x " + "fc_flag x%x new fc_flag x%x\n", + port_state, vport->port_state, + fc_flag, vport->fc_flag); + + /* + * We temporarily set fc_myDID to make it look like we are + * a Fabric. This is done just so we end up with the right + * did / sid on the FLOGI ACC rsp. + */ + did = vport->fc_myDID; + vport->fc_myDID = Fabric_DID; + } else { /* Reject this request because invalid parameters */ stat.un.b.lsRjtRsvd0 = 0; stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; stat.un.b.vendorUnique = 0; + + /* + * We temporarily set fc_myDID to make it look like we are + * a Fabric. This is done just so we end up with the right + * did / sid on the FLOGI LS_RJT rsp. + */ + did = vport->fc_myDID; + vport->fc_myDID = Fabric_DID; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + + /* Now lets put fc_myDID back to what its supposed to be */ + vport->fc_myDID = did; + return 1; } /* Send back ACC */ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); + /* Now lets put fc_myDID back to what its supposed to be */ + vport->fc_myDID = did; + + if (!(vport->fc_flag & FC_PT2PT_PLOGI)) { + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + goto fail; + + lpfc_config_link(phba, mbox); + + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + goto fail; + } + } + return 0; +fail: + return 1; } /** @@ -5167,7 +5441,6 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); - mempool_free(pmb, phba->mbox_mem_pool); elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, lpfc_max_els_tries, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); @@ -5175,8 +5448,10 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Decrement the ndlp reference count from previous mbox command */ lpfc_nlp_put(ndlp); - if (!elsiocb) + if (!elsiocb) { + mempool_free(pmb, phba->mbox_mem_pool); return; + } icmd = &elsiocb->iocb; icmd->ulpContext = rxid; @@ -5193,7 +5468,7 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); - + mempool_free(pmb, phba->mbox_mem_pool); /* Xmit ELS RLS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " @@ -5577,7 +5852,7 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, pcmd += sizeof(uint32_t); els_rrq = (struct RRQ *) pcmd; - bf_set(rrq_oxid, els_rrq, rrq->xritag); + bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); bf_set(rrq_rxid, els_rrq, rrq->rxid); bf_set(rrq_did, els_rrq, vport->fc_myDID); els_rrq->rrq = cpu_to_be32(els_rrq->rrq); @@ -5948,11 +6223,11 @@ lpfc_els_timeout(unsigned long ptr) spin_lock_irqsave(&vport->work_port_lock, iflag); tmo_posted = vport->work_port_events & WORKER_ELS_TMO; - if (!tmo_posted) + if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) vport->work_port_events |= WORKER_ELS_TMO; spin_unlock_irqrestore(&vport->work_port_lock, iflag); - if (!tmo_posted) + if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) lpfc_worker_wake_up(phba); return; } @@ -5978,19 +6253,26 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) uint32_t els_command = 0; uint32_t timeout; uint32_t remote_ID = 0xffffffff; - LIST_HEAD(txcmplq_completions); LIST_HEAD(abort_list); timeout = (uint32_t)(phba->fc_ratov << 1); pring = &phba->sli.ring[LPFC_ELS_RING]; - + if ((phba->pport->load_flag & FC_UNLOADING)) + return; spin_lock_irq(&phba->hbalock); - list_splice_init(&pring->txcmplq, &txcmplq_completions); - spin_unlock_irq(&phba->hbalock); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock(&pring->ring_lock); - list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) { + if ((phba->pport->load_flag & FC_UNLOADING)) { + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); + spin_unlock_irq(&phba->hbalock); + return; + } + + list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { cmd = &piocb->iocb; if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || @@ -6029,11 +6311,12 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) } list_add_tail(&piocb->dlist, &abort_list); } - spin_lock_irq(&phba->hbalock); - list_splice(&txcmplq_completions, &pring->txcmplq); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); spin_unlock_irq(&phba->hbalock); list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { + cmd = &piocb->iocb; lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0127 ELS timeout Data: x%x x%x x%x " "x%x\n", els_command, @@ -6044,8 +6327,10 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) spin_unlock_irq(&phba->hbalock); } - if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) - mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); + if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq)) + if (!(phba->pport->load_flag & FC_UNLOADING)) + mod_timer(&vport->els_tmofunc, + jiffies + msecs_to_jiffies(1000 * timeout)); } /** @@ -6071,15 +6356,50 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) void lpfc_els_flush_cmd(struct lpfc_vport *vport) { - LIST_HEAD(completions); + LIST_HEAD(abort_list); struct lpfc_hba *phba = vport->phba; struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; struct lpfc_iocbq *tmp_iocb, *piocb; IOCB_t *cmd = NULL; lpfc_fabric_abort_vport(vport); + /* + * For SLI3, only the hbalock is required. But SLI4 needs to coordinate + * with the ring insert operation. Because lpfc_sli_issue_abort_iotag + * ultimately grabs the ring_lock, the driver must splice the list into + * a working list and release the locks before calling the abort. + */ + spin_lock_irq(&phba->hbalock); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock(&pring->ring_lock); + + list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { + if (piocb->iocb_flag & LPFC_IO_LIBDFC) + continue; + + if (piocb->vport != vport) + continue; + list_add_tail(&piocb->dlist, &abort_list); + } + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); + spin_unlock_irq(&phba->hbalock); + /* Abort each iocb on the aborted list and remove the dlist links. */ + list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { + spin_lock_irq(&phba->hbalock); + list_del_init(&piocb->dlist); + lpfc_sli_issue_abort_iotag(phba, pring, piocb); + spin_unlock_irq(&phba->hbalock); + } + if (!list_empty(&abort_list)) + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "3387 abort list for txq not empty\n"); + INIT_LIST_HEAD(&abort_list); spin_lock_irq(&phba->hbalock); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock(&pring->ring_lock); + list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { cmd = &piocb->iocb; @@ -6097,25 +6417,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) if (piocb->vport != vport) continue; - list_move_tail(&piocb->list, &completions); - pring->txq_cnt--; - } - - list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { - if (piocb->iocb_flag & LPFC_IO_LIBDFC) { - continue; - } - - if (piocb->vport != vport) - continue; - - lpfc_sli_issue_abort_iotag(phba, pring, piocb); + list_del_init(&piocb->list); + list_add_tail(&piocb->list, &abort_list); } + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); spin_unlock_irq(&phba->hbalock); /* Cancell all the IOCBs from the completions list */ - lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, - IOERR_SLI_ABORTED); + lpfc_sli_cancel_iocbs(phba, &abort_list, + IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); return; } @@ -6140,36 +6451,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) void lpfc_els_flush_all_cmd(struct lpfc_hba *phba) { - LIST_HEAD(completions); - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; - struct lpfc_iocbq *tmp_iocb, *piocb; - IOCB_t *cmd = NULL; - - lpfc_fabric_abort_hba(phba); - spin_lock_irq(&phba->hbalock); - list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { - cmd = &piocb->iocb; - if (piocb->iocb_flag & LPFC_IO_LIBDFC) - continue; - /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ - if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || - cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || - cmd->ulpCommand == CMD_CLOSE_XRI_CN || - cmd->ulpCommand == CMD_ABORT_XRI_CN) - continue; - list_move_tail(&piocb->list, &completions); - pring->txq_cnt--; - } - list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { - if (piocb->iocb_flag & LPFC_IO_LIBDFC) - continue; - lpfc_sli_issue_abort_iotag(phba, pring, piocb); - } - spin_unlock_irq(&phba->hbalock); - - /* Cancel all the IOCBs from the completions list */ - lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, - IOERR_SLI_ABORTED); + struct lpfc_vport *vport; + list_for_each_entry(vport, &phba->port_list, listentry) + lpfc_els_flush_cmd(vport); return; } @@ -6343,7 +6627,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_nodelist *ndlp; struct ls_rjt stat; uint32_t *payload; - uint32_t cmd, did, newnode, rjt_err = 0; + uint32_t cmd, did, newnode; + uint8_t rjt_exp, rjt_err = 0; IOCB_t *icmd = &elsiocb->iocb; if (!vport || !(elsiocb->context2)) @@ -6417,7 +6702,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* ELS command <elsCmd> received from NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0112 ELS command x%x received from NPORT x%x " - "Data: x%x\n", cmd, did, vport->port_state); + "Data: x%x x%x x%x x%x\n", + cmd, did, vport->port_state, vport->fc_flag, + vport->fc_myDID, vport->fc_prevDID); switch (cmd) { case ELS_CMD_PLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, @@ -6426,27 +6713,52 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvPLOGI++; ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); + if (phba->sli_rev == LPFC_SLI_REV4 && + (phba->pport->fc_flag & FC_PT2PT)) { + vport->fc_prevDID = vport->fc_myDID; + /* Our DID needs to be updated before registering + * the vfi. This is done in lpfc_rcv_plogi but + * that is called after the reg_vfi. + */ + vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3312 Remote port assigned DID x%x " + "%x\n", vport->fc_myDID, + vport->fc_prevDID); + } lpfc_send_els_event(vport, ndlp, payload); /* If Nport discovery is delayed, reject PLOGIs */ if (vport->fc_flag & FC_DISC_DELAYED) { rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_NOTHING_MORE; break; } + shost = lpfc_shost_from_vport(vport); if (vport->port_state < LPFC_DISC_AUTH) { if (!(phba->pport->fc_flag & FC_PT2PT) || (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_NOTHING_MORE; break; } /* We get here, and drop thru, if we are PT2PT with * another NPort and the other side has initiated * the PLOGI before responding to our FLOGI. */ + if (phba->sli_rev == LPFC_SLI_REV4 && + (phba->fc_topology_changed || + vport->fc_myDID != vport->fc_prevDID)) { + lpfc_unregister_fcf_prep(phba); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + lpfc_issue_reg_vfi(vport); + } } - shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; spin_unlock_irq(shost->host_lock); @@ -6474,6 +6786,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, lpfc_send_els_event(vport, ndlp, payload); if (vport->port_state < LPFC_DISC_AUTH) { rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_NOTHING_MORE; break; } lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); @@ -6487,6 +6800,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, lpfc_send_els_event(vport, ndlp, payload); if (vport->port_state < LPFC_DISC_AUTH) { rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_NOTHING_MORE; break; } lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); @@ -6506,6 +6820,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvADISC++; if (vport->port_state < LPFC_DISC_AUTH) { rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_NOTHING_MORE; break; } lpfc_disc_state_machine(vport, ndlp, elsiocb, @@ -6519,6 +6834,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvPDISC++; if (vport->port_state < LPFC_DISC_AUTH) { rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_NOTHING_MORE; break; } lpfc_disc_state_machine(vport, ndlp, elsiocb, @@ -6556,6 +6872,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvPRLI++; if (vport->port_state < LPFC_DISC_AUTH) { rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_NOTHING_MORE; break; } lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); @@ -6639,6 +6956,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (newnode) lpfc_nlp_put(ndlp); break; + case ELS_CMD_REC: + /* receive this due to exchange closed */ + rjt_err = LSRJT_UNABLE_TPC; + rjt_exp = LSEXP_INVALID_OX_RX; + break; default: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", @@ -6646,6 +6968,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* Unsupported ELS command, reject */ rjt_err = LSRJT_CMD_UNSUPPORTED; + rjt_exp = LSEXP_NOTHING_MORE; /* Unknown ELS command <elsCmd> received from NPORT <did> */ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, @@ -6660,7 +6983,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (rjt_err) { memset(&stat, 0, sizeof(stat)); stat.un.b.lsRjtRsnCode = rjt_err; - stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + stat.un.b.lsRjtRsnCodeExp = rjt_exp; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, NULL); } @@ -6707,7 +7030,8 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && - (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) { + (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) == + IOERR_RCV_BUFFER_WAITING) { phba->fc_stat.NoRcvBuf++; /* Not enough posted buffers; Try posting more buffers */ if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) @@ -6793,8 +7117,11 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) spin_lock_irq(shost->host_lock); if (vport->fc_flag & FC_DISC_DELAYED) { spin_unlock_irq(shost->host_lock); + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "3334 Delay fc port discovery for %d seconds\n", + phba->fc_ratov); mod_timer(&vport->delayed_disc_tmo, - jiffies + HZ * phba->fc_ratov); + jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); return; } spin_unlock_irq(shost->host_lock); @@ -7078,7 +7405,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) return; shost = lpfc_shost_from_vport(phba->pport); - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -7172,7 +7499,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; /* FDISC failed */ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "0126 FDISC failed. (%d/%d)\n", + "0126 FDISC failed. (x%x/x%x)\n", irsp->ulpStatus, irsp->un.ulpWord[4]); goto fdisc_failed; } @@ -7283,6 +7610,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int rc; vport->port_state = LPFC_FDISC; + vport->fc_myDID = 0; cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, ELS_CMD_FDISC); @@ -7581,7 +7909,8 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba) blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); /* Start a timer to unblock fabric iocbs after 100ms */ if (!blocked) - mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); + mod_timer(&phba->fabric_block_timer, + jiffies + msecs_to_jiffies(100)); return; } @@ -7863,10 +8192,12 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, sglq_entry->state = SGL_FREED; spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); - lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1); + lpfc_set_rrq_active(phba, ndlp, + sglq_entry->sli4_lxritag, + rxid, 1); /* Check if TXQ queue needs to be serviced */ - if (pring->txq_cnt) + if (!(list_empty(&pring->txq))) lpfc_worker_wake_up(phba); return; } @@ -7886,3 +8217,47 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, spin_unlock_irqrestore(&phba->hbalock, iflag); return; } + +/* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. + * @vport: pointer to virtual port object. + * @ndlp: nodelist pointer for the impacted node. + * + * The driver calls this routine in response to an SLI4 XRI ABORT CQE + * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, + * the driver is required to send a LOGO to the remote node before it + * attempts to recover its login to the remote node. + */ +void +lpfc_sli_abts_recover_port(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp) +{ + struct Scsi_Host *shost; + struct lpfc_hba *phba; + unsigned long flags = 0; + + shost = lpfc_shost_from_vport(vport); + phba = vport->phba; + if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { + lpfc_printf_log(phba, KERN_INFO, + LOG_SLI, "3093 No rport recovery needed. " + "rport in state 0x%x\n", ndlp->nlp_state); + return; + } + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3094 Start rport recovery on shost id 0x%x " + "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " + "flags 0x%x\n", + shost->host_no, ndlp->nlp_DID, + vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, + ndlp->nlp_flag); + /* + * The rport is not responding. Remove the FCP-2 flag to prevent + * an ADISC in the follow-up recovery code. + */ + spin_lock_irqsave(shost->host_lock, flags); + ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + spin_unlock_irqrestore(shost->host_lock, flags); + lpfc_issue_els_logo(vport, ndlp, 0); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); +} + diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 678a4b11059..2a17e31265b 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -123,6 +123,10 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) "rport devlosscb: sid:x%x did:x%x flg:x%x", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, + "3181 dev_loss_callbk x%06x, rport %p flg x%x\n", + ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag); + /* Don't defer this if we are in the process of deleting the vport * or unloading the driver. The unload will cleanup the node * appropriately we just need to cleanup the ndlp rport info here. @@ -142,16 +146,26 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) return; + if (ndlp->nlp_type & NLP_FABRIC) { + + /* If the WWPN of the rport and ndlp don't match, ignore it */ + if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) { + put_device(&rport->dev); + return; + } + } + evtp = &ndlp->dev_loss_evt; if (!list_empty(&evtp->evt_listp)) return; + evtp->evt_arg1 = lpfc_nlp_get(ndlp); + spin_lock_irq(&phba->hbalock); /* We need to hold the node by incrementing the reference * count until this queued work is done */ - evtp->evt_arg1 = lpfc_nlp_get(ndlp); if (evtp->evt_arg1) { evtp->evt = LPFC_EVT_DEV_LOSS; list_add_tail(&evtp->evt_listp, &phba->work_list); @@ -202,6 +216,10 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) "rport devlosstmo:did:x%x type:x%x id:x%x", ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, + "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n", + ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag); + /* Don't defer this if we are in the process of deleting the vport * or unloading the driver. The unload will cleanup the node * appropriately we just need to cleanup the ndlp rport info here. @@ -530,7 +548,7 @@ lpfc_work_list_done(struct lpfc_hba *phba) break; case LPFC_EVT_OFFLINE_PREP: if (phba->link_state >= LPFC_LINK_DOWN) - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); *(int *)(evtp->evt_arg1) = 0; complete((struct completion *)(evtp->evt_arg2)); break; @@ -656,8 +674,6 @@ lpfc_work_done(struct lpfc_hba *phba) lpfc_fdmi_timeout_handler(vport); if (work_port_events & WORKER_RAMP_DOWN_QUEUE) lpfc_ramp_down_queue_handler(phba); - if (work_port_events & WORKER_RAMP_UP_QUEUE) - lpfc_ramp_up_queue_handler(phba); if (work_port_events & WORKER_DELAYED_DISC_TMO) lpfc_delayed_disc_timeout_handler(vport); } @@ -674,12 +690,15 @@ lpfc_work_done(struct lpfc_hba *phba) /* Set the lpfc data pending flag */ set_bit(LPFC_DATA_READY, &phba->data_flags); } else { - pring->flag &= ~LPFC_DEFERRED_RING_EVENT; - lpfc_sli_handle_slow_ring_event(phba, pring, - (status & - HA_RXMASK)); + if (phba->link_state >= LPFC_LINK_UP) { + pring->flag &= ~LPFC_DEFERRED_RING_EVENT; + lpfc_sli_handle_slow_ring_event(phba, pring, + (status & + HA_RXMASK)); + } } - if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt) + if ((phba->sli_rev == LPFC_SLI_REV4) & + (!list_empty(&pring->txq))) lpfc_drain_txq(phba); /* * Turn on Ring interrupts @@ -712,7 +731,8 @@ lpfc_do_work(void *p) struct lpfc_hba *phba = p; int rc; - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); + current->flags |= PF_NOFREEZE; phba->data_flags = 0; while (!kthread_should_stop()) { @@ -987,9 +1007,6 @@ lpfc_linkup(struct lpfc_hba *phba) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_linkup_port(vports[i]); lpfc_destroy_vport_work_array(phba, vports); - if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && - (phba->sli_rev < LPFC_SLI_REV4)) - lpfc_issue_clear_la(phba, phba->pport); return 0; } @@ -1094,7 +1111,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Start discovery by sending a FLOGI. port_state is identically * LPFC_FLOGI while waiting for FLOGI cmpl */ - if (vport->port_state != LPFC_FLOGI) + if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI) lpfc_initial_flogi(vport); return; @@ -1415,7 +1432,8 @@ lpfc_register_fcf(struct lpfc_hba *phba) if (phba->fcf.fcf_flag & FCF_REGISTERED) { phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); phba->hba_flag &= ~FCF_TS_INPROG; - if (phba->pport->port_state != LPFC_FLOGI) { + if (phba->pport->port_state != LPFC_FLOGI && + phba->pport->fc_flag & FC_FABRIC) { phba->hba_flag |= FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); lpfc_initial_flogi(phba->pport); @@ -1488,9 +1506,10 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, } } - /* If FCF not available return 0 */ + /* FCF not valid/available or solicitation in progress */ if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || - !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) + !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) || + bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record)) return 0; if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { @@ -1713,7 +1732,7 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) * use through a sequence of @fcf_cnt eligible FCF records with equal * probability. To perform integer manunipulation of random numbers with * size unit32_t, the lower 16 bits of the 32-bit random number returned - * from random32() are taken as the random random number generated. + * from prandom_u32() are taken as the random random number generated. * * Returns true when outcome is for the newly read FCF record should be * chosen; otherwise, return false when outcome is for keeping the previously @@ -1725,7 +1744,7 @@ lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt) uint32_t rand_num; /* Get 16-bit uniform random number */ - rand_num = (0xFFFF & random32()); + rand_num = 0xFFFF & prandom_u32(); /* Decision with probability 1/fcf_cnt */ if ((fcf_cnt * rand_num) < 0xFFFF) @@ -1773,6 +1792,8 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, virt_addr = mboxq->sge_array->addr[0]; shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; + lpfc_sli_pcimem_bcopy(shdr, shdr, + sizeof(union lpfc_sli4_cfg_shdr)); shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status) { @@ -1824,6 +1845,7 @@ lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, "\tFCF_Index : x%x\n" "\tFCF_Avail : x%x\n" "\tFCF_Valid : x%x\n" + "\tFCF_SOL : x%x\n" "\tFIP_Priority : x%x\n" "\tMAC_Provider : x%x\n" "\tLowest VLANID : x%x\n" @@ -1834,6 +1856,7 @@ lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, bf_get(lpfc_fcf_record_fcf_index, fcf_record), bf_get(lpfc_fcf_record_fcf_avail, fcf_record), bf_get(lpfc_fcf_record_fcf_valid, fcf_record), + bf_get(lpfc_fcf_record_fcf_sol, fcf_record), fcf_record->fip_priority, bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), vlan_id, @@ -2167,12 +2190,14 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) new_fcf_record)); lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2781 FCF (x%x) failed connection " - "list check: (x%x/x%x)\n", + "list check: (x%x/x%x/%x)\n", bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record), bf_get(lpfc_fcf_record_fcf_valid, + new_fcf_record), + bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record)); if ((phba->fcf.fcf_flag & FCF_IN_USE) && lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, @@ -2242,8 +2267,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2836 New FCF matches in-use " - "FCF (x%x)\n", - phba->fcf.current_rec.fcf_indx); + "FCF (x%x), port_state:x%x, " + "fc_flag:x%x\n", + phba->fcf.current_rec.fcf_indx, + phba->pport->port_state, + phba->pport->fc_flag); goto out; } else lpfc_printf_log(phba, KERN_ERR, LOG_FIP, @@ -2357,7 +2385,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) phba->fcf.eligible_fcf_cnt = 1; /* Seeding the random number generator for random selection */ seed = (uint32_t)(0xFFFFFFFF & jiffies); - srandom32(seed); + prandom_seed(seed); } spin_unlock_irq(&phba->hbalock); goto read_next_fcf; @@ -2515,8 +2543,11 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) if (!new_fcf_record) { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2766 Mailbox command READ_FCF_RECORD " - "failed to retrieve a FCF record.\n"); - goto error_out; + "failed to retrieve a FCF record. " + "hba_flg x%x fcf_flg x%x\n", phba->hba_flag, + phba->fcf.fcf_flag); + lpfc_unregister_fcf_rescan(phba); + goto out; } /* Get the needed parameters from FCF record */ @@ -2768,7 +2799,19 @@ void lpfc_issue_init_vpi(struct lpfc_vport *vport) { LPFC_MBOXQ_t *mboxq; - int rc; + int rc, vpi; + + if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) { + vpi = lpfc_alloc_vpi(vport->phba); + if (!vpi) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_MBOX, + "3303 Failed to obtain vport vpi\n"); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + return; + } + vport->vpi = vpi; + } mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { @@ -2843,7 +2886,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) struct lpfc_vport *vport = mboxq->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - if (mboxq->u.mb.mbxStatus) { + /* + * VFI not supported for interface type 0, so ignore any mailbox + * error (except VFI in use) and continue with the discovery. + */ + if (mboxq->u.mb.mbxStatus && + (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + LPFC_SLI_INTF_IF_TYPE_0) && + mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "2018 REG_VFI mbxStatus error x%x " "HBA state x%x\n", @@ -2858,6 +2908,16 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_vport_set_state(vport, FC_VPORT_FAILED); goto out_free_mem; } + + /* If the VFI is already registered, there is nothing else to do + * Unless this was a VFI update and we are in PT2PT mode, then + * we should drop through to set the port state to ready. + */ + if (vport->fc_flag & FC_VFI_REGISTERED) + if (!(phba->sli_rev == LPFC_SLI_REV4 && + vport->fc_flag & FC_PT2PT)) + goto out_free_mem; + /* The VPI is implicitly registered when the VFI is registered */ spin_lock_irq(shost->host_lock); vport->vpi_state |= LPFC_VPI_REGISTERED; @@ -2873,14 +2933,29 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) goto out_free_mem; } + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x " + "alpacnt:%d LinkState:%x topology:%x\n", + vport->port_state, vport->fc_flag, vport->fc_myDID, + vport->phba->alpa_map[0], + phba->link_state, phba->fc_topology); + if (vport->port_state == LPFC_FABRIC_CFG_LINK) { - /* For private loop just start discovery and we are done. */ - if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && - !(vport->fc_flag & FC_PUBLIC_LOOP)) { + /* + * For private loop or for NPort pt2pt, + * just start discovery and we are done. + */ + if ((vport->fc_flag & FC_PT2PT) || + ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && + !(vport->fc_flag & FC_PUBLIC_LOOP))) { + /* Use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ - lpfc_disc_start(vport); + if (vport->fc_flag & FC_PT2PT) + vport->port_state = LPFC_VPORT_READY; + else + lpfc_disc_start(vport); } else { lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); @@ -2945,6 +3020,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) struct lpfc_dmabuf *mp; int rc; struct fcf_record *fcf_record; + uint32_t fc_flags = 0; spin_lock_irq(&phba->hbalock); switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { @@ -2961,6 +3037,15 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) break; } + if (phba->fc_topology && + phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3314 Toplogy changed was 0x%x is 0x%x\n", + phba->fc_topology, + bf_get(lpfc_mbx_read_top_topology, la)); + phba->fc_topology_changed = 1; + } + phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; @@ -2976,11 +3061,8 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) "1309 Link Up Event npiv not supported in loop " "topology\n"); /* Get Loop Map information */ - if (bf_get(lpfc_mbx_read_top_il, la)) { - spin_lock_irq(shost->host_lock); - vport->fc_flag |= FC_LBIT; - spin_unlock_irq(shost->host_lock); - } + if (bf_get(lpfc_mbx_read_top_il, la)) + fc_flags |= FC_LBIT; vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); i = la->lilpBde64.tus.f.bdeSize; @@ -3029,11 +3111,15 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; } vport->fc_myDID = phba->fc_pref_DID; + fc_flags |= FC_LBIT; + } + spin_unlock_irq(&phba->hbalock); + + if (fc_flags) { spin_lock_irq(shost->host_lock); - vport->fc_flag |= FC_LBIT; + vport->fc_flag |= fc_flags; spin_unlock_irq(shost->host_lock); } - spin_unlock_irq(&phba->hbalock); lpfc_linkup(phba); sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -3202,8 +3288,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport->fc_flag &= ~FC_BYPASSED_MODE; spin_unlock_irq(shost->host_lock); - if ((phba->fc_eventTag < la->eventTag) || - (phba->fc_eventTag == la->eventTag)) { + if (phba->fc_eventTag <= la->eventTag) { phba->fc_stat.LinkMultiEvent++; if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) if (phba->fc_eventTag != 0) @@ -3211,16 +3296,18 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } phba->fc_eventTag = la->eventTag; - spin_lock_irq(&phba->hbalock); - if (bf_get(lpfc_mbx_read_top_mm, la)) - phba->sli.sli_flag |= LPFC_MENLO_MAINT; - else - phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; - spin_unlock_irq(&phba->hbalock); + if (phba->sli_rev < LPFC_SLI_REV4) { + spin_lock_irq(&phba->hbalock); + if (bf_get(lpfc_mbx_read_top_mm, la)) + phba->sli.sli_flag |= LPFC_MENLO_MAINT; + else + phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; + spin_unlock_irq(&phba->hbalock); + } phba->link_events++; if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) && - (!bf_get(lpfc_mbx_read_top_mm, la))) { + !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) { phba->fc_stat.LinkUp++; if (phba->link_flag & LS_LOOPBACK_MODE) { lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, @@ -3265,8 +3352,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) bf_get(lpfc_mbx_read_top_fa, la)); lpfc_mbx_issue_link_down(phba); } - if ((bf_get(lpfc_mbx_read_top_mm, la)) && - (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) { + if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) && + ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP))) { if (phba->link_state != LPFC_LINK_DOWN) { phba->fc_stat.LinkDown++; lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, @@ -3294,8 +3381,9 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } } - if (bf_get(lpfc_mbx_read_top_fa, la)) { - if (bf_get(lpfc_mbx_read_top_mm, la)) + if ((phba->sli_rev < LPFC_SLI_REV4) && + bf_get(lpfc_mbx_read_top_fa, la)) { + if (phba->sli.sli_flag & LPFC_MENLO_MAINT) lpfc_issue_clear_la(phba, vport); lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, "1311 fa %d\n", @@ -3479,7 +3567,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba) LPFC_MBOXQ_t *pmb = NULL; MAILBOX_t *mb; struct static_vport_info *vport_info; - int rc = 0, i; + int mbx_wait_rc = 0, i; struct fc_vport_identifiers vport_id; struct fc_vport *new_fc_vport; struct Scsi_Host *shost; @@ -3496,7 +3584,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba) " allocate mailbox memory\n"); return; } - + memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb = &pmb->u.mb; vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); @@ -3510,24 +3598,31 @@ lpfc_create_static_vport(struct lpfc_hba *phba) vport_buff = (uint8_t *) vport_info; do { + /* free dma buffer from previous round */ + if (pmb->context1) { + mp = (struct lpfc_dmabuf *)pmb->context1; + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + } if (lpfc_dump_static_vport(phba, pmb, offset)) goto out; pmb->vport = phba->pport; - rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO); + mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb, + LPFC_MBOX_TMO); - if ((rc != MBX_SUCCESS) || mb->mbxStatus) { + if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0544 lpfc_create_static_vport failed to" " issue dump mailbox command ret 0x%x " "status 0x%x\n", - rc, mb->mbxStatus); + mbx_wait_rc, mb->mbxStatus); goto out; } if (phba->sli_rev == LPFC_SLI_REV4) { byte_count = pmb->u.mqe.un.mb_words[5]; - mp = (struct lpfc_dmabuf *) pmb->context2; + mp = (struct lpfc_dmabuf *)pmb->context1; if (byte_count > sizeof(struct static_vport_info) - offset) byte_count = sizeof(struct static_vport_info) @@ -3591,9 +3686,9 @@ lpfc_create_static_vport(struct lpfc_hba *phba) out: kfree(vport_info); - if (rc != MBX_TIMEOUT) { - if (pmb->context2) { - mp = (struct lpfc_dmabuf *) pmb->context2; + if (mbx_wait_rc != MBX_TIMEOUT) { + if (pmb->context1) { + mp = (struct lpfc_dmabuf *)pmb->context1; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } @@ -3821,6 +3916,10 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) fc_remote_port_rolechg(rport, rport_ids.roles); + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, + "3183 rport register x%06x, rport %p role x%x\n", + ndlp->nlp_DID, rport, rport_ids.roles); + if ((rport->scsi_target_id != -1) && (rport->scsi_target_id < LPFC_MAX_TARGET)) { ndlp->nlp_sid = rport->scsi_target_id; @@ -3837,6 +3936,10 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) "rport delete: did:x%x flg:x%x type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, + "3184 rport unregister x%06x, rport %p\n", + ndlp->nlp_DID, rport); + fc_remote_port_delete(rport); return; @@ -3871,7 +3974,10 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) vport->fc_map_cnt += count; break; case NLP_STE_NPR_NODE: - vport->fc_npr_cnt += count; + if (vport->fc_npr_cnt == 0 && count == -1) + vport->fc_npr_cnt = 0; + else + vport->fc_npr_cnt += count; break; } spin_unlock_irq(shost->host_lock); @@ -3951,6 +4057,7 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state) [NLP_STE_ADISC_ISSUE] = "ADISC", [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN", [NLP_STE_PRLI_ISSUE] = "PRLI", + [NLP_STE_LOGO_ISSUE] = "LOGO", [NLP_STE_UNMAPPED_NODE] = "UNMAPPED", [NLP_STE_MAPPED_NODE] = "MAPPED", [NLP_STE_NPR_NODE] = "NPR", @@ -4068,8 +4175,6 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, NLP_INT_NODE_ACT(ndlp); atomic_set(&ndlp->cmd_pending, 0); ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; - if (vport->phba->sli_rev == LPFC_SLI_REV4) - ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); } struct lpfc_nodelist * @@ -4079,6 +4184,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_hba *phba = vport->phba; uint32_t did; unsigned long flags; + unsigned long *active_rrqs_xri_bitmap = NULL; if (!ndlp) return NULL; @@ -4107,13 +4213,21 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Keep the original DID */ did = ndlp->nlp_DID; + if (phba->sli_rev == LPFC_SLI_REV4) + active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap; /* re-initialize ndlp except of ndlp linked list pointer */ memset((((char *)ndlp) + sizeof (struct list_head)), 0, sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); lpfc_initialize_node(vport, ndlp, did); + if (phba->sli_rev == LPFC_SLI_REV4) + ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap; + spin_unlock_irqrestore(&phba->ndlp_lock, flags); + if (vport->phba->sli_rev == LPFC_SLI_REV4) + ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); + if (state != NLP_STE_UNUSED_NODE) lpfc_nlp_set_state(vport, ndlp, state); @@ -4170,7 +4284,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport) tmo, vport->port_state, vport->fc_flag); } - mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo); + mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo)); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_DISC_TMO; spin_unlock_irq(shost->host_lock); @@ -4303,7 +4417,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) with an error */ list_move_tail(&iocb->list, &completions); - pring->txq_cnt--; } } spin_unlock_irq(&phba->hbalock); @@ -4317,6 +4430,27 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) return 0; } +/** + * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO + * @phba: Pointer to HBA context object. + * @pmb: Pointer to mailbox object. + * + * This function will issue an ELS LOGO command after completing + * the UNREG_RPI. + **/ +void +lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct lpfc_nodelist *ndlp; + + ndlp = (struct lpfc_nodelist *)(pmb->context1); + if (!ndlp) + return; + lpfc_issue_els_logo(vport, ndlp, 0); + mempool_free(pmb, phba->mbox_mem_pool); +} + /* * Free rpi associated with LPFC_NODELIST entry. * This routine is called from lpfc_freenode(), when we are removing @@ -4334,16 +4468,31 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) int rc; uint16_t rpi; - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (ndlp->nlp_flag & NLP_RPI_REGISTERED || + ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { + if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3366 RPI x%x needs to be " + "unregistered nlp_flag x%x " + "did x%x\n", + ndlp->nlp_rpi, ndlp->nlp_flag, + ndlp->nlp_DID); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { /* SLI4 ports require the physical rpi value. */ rpi = ndlp->nlp_rpi; if (phba->sli_rev == LPFC_SLI_REV4) rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; + lpfc_unreg_login(phba, vport->vpi, rpi, mbox); mbox->vport = vport; - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { + mbox->context1 = ndlp; + mbox->mbox_cmpl = lpfc_nlp_logo_unreg; + } else { + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + } + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) mempool_free(mbox, phba->mbox_mem_pool); @@ -4486,9 +4635,13 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_disable_node(vport, ndlp); } + + /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */ + /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ if ((mb = phba->sli.mbox_active)) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && + !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mb->context2 = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -4499,6 +4652,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* Cleanup REG_LOGIN completions which are not yet processed */ list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || + (mb->mbox_flag & LPFC_MBX_IMED_UNREG) || (ndlp != (struct lpfc_nodelist *) mb->context2)) continue; @@ -4508,6 +4662,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && + !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mp = (struct lpfc_dmabuf *) (mb->context1); if (mp) { @@ -4572,7 +4727,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; mbox->vport = vport; - mbox->context2 = NULL; + mbox->context2 = ndlp; rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); @@ -4654,9 +4809,10 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) ((uint32_t) ndlp->nlp_rpi & 0xff)); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0929 FIND node DID " - "Data: x%p x%x x%x x%x\n", + "Data: x%p x%x x%x x%x %p\n", ndlp, ndlp->nlp_DID, - ndlp->nlp_flag, data1); + ndlp->nlp_flag, data1, + ndlp->active_rrqs_xri_bitmap); return ndlp; } } @@ -4852,8 +5008,12 @@ lpfc_disc_start(struct lpfc_vport *vport) uint32_t clear_la_pending; int did_changed; - if (!lpfc_is_link_up(phba)) + if (!lpfc_is_link_up(phba)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3315 Link is not up %x\n", + phba->link_state); return; + } if (phba->link_state == LPFC_CLEAR_LA) clear_la_pending = 1; @@ -4886,11 +5046,13 @@ lpfc_disc_start(struct lpfc_vport *vport) if (num_sent) return; - /* Register the VPI for SLI3, NON-NPIV only. */ + /* Register the VPI for SLI3, NPIV only. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_RSCN_MODE) && (phba->sli_rev < LPFC_SLI_REV4)) { + if (vport->port_type == LPFC_PHYSICAL_PORT) + lpfc_issue_clear_la(phba, vport); lpfc_issue_reg_vpi(phba, vport); return; } @@ -4971,7 +5133,6 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) { list_move_tail(&iocb->list, &completions); - pring->txq_cnt--; } } @@ -5314,7 +5475,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (vport->cfg_fdmi_on == 1) lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); else - mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); + mod_timer(&vport->fc_fdmitmo, + jiffies + msecs_to_jiffies(1000 * 60)); /* decrement the node reference count held for this callback * function. @@ -5332,6 +5494,10 @@ lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param) { uint16_t *rpi = param; + /* check for active node */ + if (!NLP_CHK_NODE_ACT(ndlp)) + return 0; + return ndlp->nlp_rpi == *rpi; } @@ -5348,9 +5514,17 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) struct lpfc_nodelist *ndlp; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { - if (filter(ndlp, param)) + if (filter(ndlp, param)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "3185 FIND node filter %p DID " + "Data: x%p x%x x%x\n", + filter, ndlp, ndlp->nlp_DID, + ndlp->nlp_flag); return ndlp; + } } + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "3186 FIND node filter %p NOT FOUND.\n", filter); return NULL; } @@ -5455,6 +5629,17 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_initialize_node(vport, ndlp, did); INIT_LIST_HEAD(&ndlp->nlp_listp); + if (vport->phba->sli_rev == LPFC_SLI_REV4) { + ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); + ndlp->active_rrqs_xri_bitmap = + mempool_alloc(vport->phba->active_rrq_pool, + GFP_KERNEL); + if (ndlp->active_rrqs_xri_bitmap) + memset(ndlp->active_rrqs_xri_bitmap, 0, + ndlp->phba->cfg_rrq_xri_bitmap_sz); + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, "node init: did:x%x", @@ -5479,9 +5664,9 @@ lpfc_nlp_release(struct kref *kref) ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "0279 lpfc_nlp_release: ndlp:x%p " + "0279 lpfc_nlp_release: ndlp:x%p did %x " "usgmap:x%x refcnt:%d\n", - (void *)ndlp, ndlp->nlp_usg_map, + (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map, atomic_read(&ndlp->kref.refcount)); /* remove ndlp from action. */ @@ -5498,6 +5683,9 @@ lpfc_nlp_release(struct kref *kref) /* free ndlp memory for final ndlp release */ if (NLP_CHK_FREE_REQ(ndlp)) { kfree(ndlp->lat_data); + if (phba->sli_rev == LPFC_SLI_REV4) + mempool_free(ndlp->active_rrqs_xri_bitmap, + ndlp->phba->active_rrq_pool); mempool_free(ndlp, ndlp->phba->nlp_mem_pool); } } @@ -5669,14 +5857,13 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) ret = 1; spin_unlock_irq(shost->host_lock); goto out; - } else { + } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + ret = 1; lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "2624 RPI %x DID %x flg %x still " - "logged in\n", - ndlp->nlp_rpi, ndlp->nlp_DID, - ndlp->nlp_flag); - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) - ret = 1; + "2624 RPI %x DID %x flag %x " + "still logged in\n", + ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_flag); } } spin_unlock_irq(shost->host_lock); @@ -5748,7 +5935,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba) struct lpfc_vport **vports; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; - int i, rc; + int i = 0, rc; /* Unregister RPIs */ if (lpfc_fcf_inuse(phba)) @@ -5776,6 +5963,20 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba) spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); + if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) { + ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); + if (ndlp) + lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); + lpfc_cleanup_pending_mbox(phba->pport); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(phba->pport); + lpfc_mbx_unreg_vpi(phba->pport); + shost = lpfc_shost_from_vport(phba->pport); + spin_lock_irq(shost->host_lock); + phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED; + spin_unlock_irq(shost->host_lock); + } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); @@ -5991,13 +6192,41 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, memcpy(&conn_entry->conn_rec, &conn_rec[i], sizeof(struct lpfc_fcf_conn_rec)); - conn_entry->conn_rec.vlan_tag = - le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF; - conn_entry->conn_rec.flags = - le16_to_cpu(conn_entry->conn_rec.flags); list_add_tail(&conn_entry->list, &phba->fcf_conn_rec_list); } + + if (!list_empty(&phba->fcf_conn_rec_list)) { + i = 0; + list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, + list) { + conn_rec = &conn_entry->conn_rec; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3345 FCF connection list rec[%02d]: " + "flags:x%04x, vtag:x%04x, " + "fabric_name:x%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x, " + "switch_name:x%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x\n", i++, + conn_rec->flags, conn_rec->vlan_tag, + conn_rec->fabric_name[0], + conn_rec->fabric_name[1], + conn_rec->fabric_name[2], + conn_rec->fabric_name[3], + conn_rec->fabric_name[4], + conn_rec->fabric_name[5], + conn_rec->fabric_name[6], + conn_rec->fabric_name[7], + conn_rec->switch_name[0], + conn_rec->switch_name[1], + conn_rec->switch_name[2], + conn_rec->switch_name[3], + conn_rec->switch_name[4], + conn_rec->switch_name[5], + conn_rec->switch_name[6], + conn_rec->switch_name[7]); + } + } } /** diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 7245bead375..23625925237 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2010 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -45,6 +45,7 @@ #define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */ #define LPFC_ELS_RING 2 /* ring 2 for ELS commands */ #define LPFC_FCP_NEXT_RING 3 +#define LPFC_FCP_OAS_RING 3 #define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */ #define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */ @@ -70,6 +71,7 @@ /* vendor ID used in SCSI netlink calls */ #define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX) +#define FW_REV_STR_SIZE 32 /* Common Transport structures and definitions */ union CtRevisionId { @@ -537,6 +539,7 @@ struct fc_vft_header { #define ELS_CMD_ECHO 0x10000000 #define ELS_CMD_TEST 0x11000000 #define ELS_CMD_RRQ 0x12000000 +#define ELS_CMD_REC 0x13000000 #define ELS_CMD_PRLI 0x20100014 #define ELS_CMD_PRLO 0x21100014 #define ELS_CMD_PRLO_ACC 0x02100014 @@ -573,6 +576,7 @@ struct fc_vft_header { #define ELS_CMD_ECHO 0x10 #define ELS_CMD_TEST 0x11 #define ELS_CMD_RRQ 0x12 +#define ELS_CMD_REC 0x13 #define ELS_CMD_PRLI 0x14001020 #define ELS_CMD_PRLO 0x14001021 #define ELS_CMD_PRLO_ACC 0x14001002 @@ -1187,8 +1191,8 @@ typedef struct { */ /* Number of rings currently used and available. */ -#define MAX_CONFIGURED_RINGS 3 -#define MAX_RINGS 4 +#define MAX_SLI3_CONFIGURED_RINGS 3 +#define MAX_SLI3_RINGS 4 /* IOCB / Mailbox is owned by FireFly */ #define OWN_CHIP 1 @@ -1250,6 +1254,8 @@ typedef struct { #define PCI_VENDOR_ID_SERVERENGINE 0x19a2 #define PCI_DEVICE_ID_TIGERSHARK 0x0704 #define PCI_DEVICE_ID_TOMCAT 0x0714 +#define PCI_DEVICE_ID_SKYHAWK 0x0724 +#define PCI_DEVICE_ID_SKYHAWK_VF 0x072c #define JEDEC_ID_ADDRESS 0x0080001c #define FIREFLY_JEDEC_ID 0x1ACC @@ -1457,6 +1463,7 @@ typedef struct { /* FireFly BIU registers */ #define MBX_UNREG_FCFI 0xA2 #define MBX_INIT_VFI 0xA3 #define MBX_INIT_VPI 0xA4 +#define MBX_ACCESS_VDATA 0xA5 #define MBX_AUTH_PORT 0xF8 #define MBX_SECURITY_MGMT 0xF9 @@ -1661,6 +1668,7 @@ enum lpfc_protgrp_type { #define BG_OP_IN_CSUM_OUT_CSUM 0x5 #define BG_OP_IN_CRC_OUT_CSUM 0x6 #define BG_OP_IN_CSUM_OUT_CRC 0x7 +#define BG_OP_RAW_MODE 0x8 struct lpfc_pde5 { uint32_t word0; @@ -2567,6 +2575,8 @@ typedef struct { #define DMP_MEM_REG 0x1 #define DMP_NV_PARAMS 0x2 +#define DMP_LMSD 0x3 /* Link Module Serial Data */ +#define DMP_WELL_KNOWN 0x4 #define DMP_REGION_VPD 0xe #define DMP_VPD_SIZE 0x400 /* maximum amount of VPD */ @@ -2988,7 +2998,7 @@ typedef struct _PCB { uint32_t pgpAddrLow; uint32_t pgpAddrHigh; - SLI2_RDSC rdsc[MAX_RINGS]; + SLI2_RDSC rdsc[MAX_SLI3_RINGS]; } PCB_t; /* NEW_FEATURE */ @@ -3098,18 +3108,18 @@ struct lpfc_pgp { struct sli2_desc { uint32_t unused1[16]; - struct lpfc_hgp host[MAX_RINGS]; - struct lpfc_pgp port[MAX_RINGS]; + struct lpfc_hgp host[MAX_SLI3_RINGS]; + struct lpfc_pgp port[MAX_SLI3_RINGS]; }; struct sli3_desc { - struct lpfc_hgp host[MAX_RINGS]; + struct lpfc_hgp host[MAX_SLI3_RINGS]; uint32_t reserved[8]; uint32_t hbq_put[16]; }; struct sli3_pgp { - struct lpfc_pgp port[MAX_RINGS]; + struct lpfc_pgp port[MAX_SLI3_RINGS]; uint32_t hbq_get[16]; }; @@ -3239,6 +3249,7 @@ typedef struct { #define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */ #define IOERR_SLI_BRESET 0x102 #define IOERR_SLI_ABORTED 0x103 +#define IOERR_PARAM_MASK 0x1ff } PARM_ERR; typedef union { @@ -3371,6 +3382,9 @@ typedef struct { WORD5 w5; /* Header control/status word */ } XMT_SEQ_FIELDS64; +/* This word is remote ports D_ID for XMIT_ELS_RSP64 */ +#define xmit_els_remoteID xrsqRo + /* IOCB Command template for 64 bit RCV_SEQUENCE64 */ typedef struct { struct ulp_bde64 rcvBde; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index e5bfa7f334e..f432ec180cf 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009 Emulex. All rights reserved. * + * Copyright (C) 2009-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -106,6 +106,7 @@ struct lpfc_sli_intf { #define LPFC_SLI4_MB_WORD_COUNT 64 #define LPFC_MAX_MQ_PAGE 8 +#define LPFC_MAX_WQ_PAGE_V0 4 #define LPFC_MAX_WQ_PAGE 8 #define LPFC_MAX_CQ_PAGE 4 #define LPFC_MAX_EQ_PAGE 8 @@ -187,11 +188,22 @@ struct lpfc_sli_intf { /* Active interrupt test count */ #define LPFC_ACT_INTR_CNT 4 +/* Algrithmns for scheduling FCP commands to WQs */ +#define LPFC_FCP_SCHED_ROUND_ROBIN 0 +#define LPFC_FCP_SCHED_BY_CPU 1 + /* Delay Multiplier constant */ #define LPFC_DMULT_CONST 651042 -#define LPFC_MIM_IMAX 636 -#define LPFC_FP_DEF_IMAX 10000 -#define LPFC_SP_DEF_IMAX 10000 + +/* Configuration of Interrupts / sec for entire HBA port */ +#define LPFC_MIN_IMAX 5000 +#define LPFC_MAX_IMAX 5000000 +#define LPFC_DEF_IMAX 50000 + +#define LPFC_MIN_CPU_MAP 0 +#define LPFC_MAX_CPU_MAP 2 +#define LPFC_HBA_CPU_MAP 1 +#define LPFC_DRIVER_CPU_MAP 2 /* Default */ /* PORT_CAPABILITIES constants. */ #define LPFC_MAX_SUPPORTED_PAGES 8 @@ -222,25 +234,24 @@ struct ulp_bde64 { uint32_t addrHigh; }; +/* Maximun size of immediate data that can fit into a 128 byte WQE */ +#define LPFC_MAX_BDE_IMM_SIZE 64 + struct lpfc_sli4_flags { uint32_t word0; #define lpfc_idx_rsrc_rdy_SHIFT 0 #define lpfc_idx_rsrc_rdy_MASK 0x00000001 #define lpfc_idx_rsrc_rdy_WORD word0 #define LPFC_IDX_RSRC_RDY 1 -#define lpfc_xri_rsrc_rdy_SHIFT 1 -#define lpfc_xri_rsrc_rdy_MASK 0x00000001 -#define lpfc_xri_rsrc_rdy_WORD word0 -#define LPFC_XRI_RSRC_RDY 1 -#define lpfc_rpi_rsrc_rdy_SHIFT 2 +#define lpfc_rpi_rsrc_rdy_SHIFT 1 #define lpfc_rpi_rsrc_rdy_MASK 0x00000001 #define lpfc_rpi_rsrc_rdy_WORD word0 #define LPFC_RPI_RSRC_RDY 1 -#define lpfc_vpi_rsrc_rdy_SHIFT 3 +#define lpfc_vpi_rsrc_rdy_SHIFT 2 #define lpfc_vpi_rsrc_rdy_MASK 0x00000001 #define lpfc_vpi_rsrc_rdy_WORD word0 #define LPFC_VPI_RSRC_RDY 1 -#define lpfc_vfi_rsrc_rdy_SHIFT 4 +#define lpfc_vfi_rsrc_rdy_SHIFT 3 #define lpfc_vfi_rsrc_rdy_MASK 0x00000001 #define lpfc_vfi_rsrc_rdy_WORD word0 #define LPFC_VFI_RSRC_RDY 1 @@ -321,6 +332,10 @@ struct lpfc_cqe { #define CQE_STATUS_CMD_REJECT 0xb #define CQE_STATUS_FCP_TGT_LENCHECK 0xc #define CQE_STATUS_NEED_BUFF_ENTRY 0xf +#define CQE_STATUS_DI_ERROR 0x16 + +/* Used when mapping CQE status to IOCB */ +#define LPFC_IOCB_STATUS_MASK 0xf /* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */ #define CQE_HW_STATUS_NO_ERR 0x0 @@ -334,6 +349,12 @@ struct lpfc_cqe { #define CQE_CODE_XRI_ABORTED 0x5 #define CQE_CODE_RECEIVE_V1 0x9 +/* + * Define mask value for xri_aborted and wcqe completed CQE extended status. + * Currently, extended status is limited to 9 bits (0x0 -> 0x103) . + */ +#define WCQE_PARAM_MASK 0x1FF + /* completion queue entry for wqe completions */ struct lpfc_wcqe_complete { uint32_t word0; @@ -348,6 +369,21 @@ struct lpfc_wcqe_complete { #define lpfc_wcqe_c_hw_status_WORD word0 uint32_t total_data_placed; uint32_t parameter; +#define lpfc_wcqe_c_bg_edir_SHIFT 5 +#define lpfc_wcqe_c_bg_edir_MASK 0x00000001 +#define lpfc_wcqe_c_bg_edir_WORD parameter +#define lpfc_wcqe_c_bg_tdpv_SHIFT 3 +#define lpfc_wcqe_c_bg_tdpv_MASK 0x00000001 +#define lpfc_wcqe_c_bg_tdpv_WORD parameter +#define lpfc_wcqe_c_bg_re_SHIFT 2 +#define lpfc_wcqe_c_bg_re_MASK 0x00000001 +#define lpfc_wcqe_c_bg_re_WORD parameter +#define lpfc_wcqe_c_bg_ae_SHIFT 1 +#define lpfc_wcqe_c_bg_ae_MASK 0x00000001 +#define lpfc_wcqe_c_bg_ae_WORD parameter +#define lpfc_wcqe_c_bg_ge_SHIFT 0 +#define lpfc_wcqe_c_bg_ge_MASK 0x00000001 +#define lpfc_wcqe_c_bg_ge_WORD parameter uint32_t word3; #define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT #define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK @@ -359,8 +395,8 @@ struct lpfc_wcqe_complete { #define lpfc_wcqe_c_pv_MASK 0x00000001 #define lpfc_wcqe_c_pv_WORD word3 #define lpfc_wcqe_c_priority_SHIFT 24 -#define lpfc_wcqe_c_priority_MASK 0x00000007 -#define lpfc_wcqe_c_priority_WORD word3 +#define lpfc_wcqe_c_priority_MASK 0x00000007 +#define lpfc_wcqe_c_priority_WORD word3 #define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT #define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK #define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD @@ -593,7 +629,7 @@ struct lpfc_register { #define lpfc_sliport_status_rdy_SHIFT 23 #define lpfc_sliport_status_rdy_MASK 0x1 #define lpfc_sliport_status_rdy_WORD word0 -#define MAX_IF_TYPE_2_RESETS 1000 +#define MAX_IF_TYPE_2_RESETS 6 #define LPFC_CTL_PORT_CTL_OFFSET 0x408 #define lpfc_sliport_ctrl_end_SHIFT 30 @@ -676,24 +712,41 @@ struct lpfc_register { * BAR0. The offsets are the same so the driver must account for * any base address difference. */ -#define LPFC_RQ_DOORBELL 0x00A0 -#define lpfc_rq_doorbell_num_posted_SHIFT 16 -#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF -#define lpfc_rq_doorbell_num_posted_WORD word0 -#define lpfc_rq_doorbell_id_SHIFT 0 -#define lpfc_rq_doorbell_id_MASK 0xFFFF -#define lpfc_rq_doorbell_id_WORD word0 - -#define LPFC_WQ_DOORBELL 0x0040 -#define lpfc_wq_doorbell_num_posted_SHIFT 24 -#define lpfc_wq_doorbell_num_posted_MASK 0x00FF -#define lpfc_wq_doorbell_num_posted_WORD word0 -#define lpfc_wq_doorbell_index_SHIFT 16 -#define lpfc_wq_doorbell_index_MASK 0x00FF -#define lpfc_wq_doorbell_index_WORD word0 -#define lpfc_wq_doorbell_id_SHIFT 0 -#define lpfc_wq_doorbell_id_MASK 0xFFFF -#define lpfc_wq_doorbell_id_WORD word0 +#define LPFC_ULP0_RQ_DOORBELL 0x00A0 +#define LPFC_ULP1_RQ_DOORBELL 0x00C0 +#define lpfc_rq_db_list_fm_num_posted_SHIFT 24 +#define lpfc_rq_db_list_fm_num_posted_MASK 0x00FF +#define lpfc_rq_db_list_fm_num_posted_WORD word0 +#define lpfc_rq_db_list_fm_index_SHIFT 16 +#define lpfc_rq_db_list_fm_index_MASK 0x00FF +#define lpfc_rq_db_list_fm_index_WORD word0 +#define lpfc_rq_db_list_fm_id_SHIFT 0 +#define lpfc_rq_db_list_fm_id_MASK 0xFFFF +#define lpfc_rq_db_list_fm_id_WORD word0 +#define lpfc_rq_db_ring_fm_num_posted_SHIFT 16 +#define lpfc_rq_db_ring_fm_num_posted_MASK 0x3FFF +#define lpfc_rq_db_ring_fm_num_posted_WORD word0 +#define lpfc_rq_db_ring_fm_id_SHIFT 0 +#define lpfc_rq_db_ring_fm_id_MASK 0xFFFF +#define lpfc_rq_db_ring_fm_id_WORD word0 + +#define LPFC_ULP0_WQ_DOORBELL 0x0040 +#define LPFC_ULP1_WQ_DOORBELL 0x0060 +#define lpfc_wq_db_list_fm_num_posted_SHIFT 24 +#define lpfc_wq_db_list_fm_num_posted_MASK 0x00FF +#define lpfc_wq_db_list_fm_num_posted_WORD word0 +#define lpfc_wq_db_list_fm_index_SHIFT 16 +#define lpfc_wq_db_list_fm_index_MASK 0x00FF +#define lpfc_wq_db_list_fm_index_WORD word0 +#define lpfc_wq_db_list_fm_id_SHIFT 0 +#define lpfc_wq_db_list_fm_id_MASK 0xFFFF +#define lpfc_wq_db_list_fm_id_WORD word0 +#define lpfc_wq_db_ring_fm_num_posted_SHIFT 16 +#define lpfc_wq_db_ring_fm_num_posted_MASK 0x3FFF +#define lpfc_wq_db_ring_fm_num_posted_WORD word0 +#define lpfc_wq_db_ring_fm_id_SHIFT 0 +#define lpfc_wq_db_ring_fm_id_MASK 0xFFFF +#define lpfc_wq_db_ring_fm_id_WORD word0 #define LPFC_EQCQ_DOORBELL 0x0120 #define lpfc_eqcq_doorbell_se_SHIFT 31 @@ -715,12 +768,20 @@ struct lpfc_register { #define lpfc_eqcq_doorbell_eqci_SHIFT 9 #define lpfc_eqcq_doorbell_eqci_MASK 0x0001 #define lpfc_eqcq_doorbell_eqci_WORD word0 -#define lpfc_eqcq_doorbell_cqid_SHIFT 0 -#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF -#define lpfc_eqcq_doorbell_cqid_WORD word0 -#define lpfc_eqcq_doorbell_eqid_SHIFT 0 -#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF -#define lpfc_eqcq_doorbell_eqid_WORD word0 +#define lpfc_eqcq_doorbell_cqid_lo_SHIFT 0 +#define lpfc_eqcq_doorbell_cqid_lo_MASK 0x03FF +#define lpfc_eqcq_doorbell_cqid_lo_WORD word0 +#define lpfc_eqcq_doorbell_cqid_hi_SHIFT 11 +#define lpfc_eqcq_doorbell_cqid_hi_MASK 0x001F +#define lpfc_eqcq_doorbell_cqid_hi_WORD word0 +#define lpfc_eqcq_doorbell_eqid_lo_SHIFT 0 +#define lpfc_eqcq_doorbell_eqid_lo_MASK 0x01FF +#define lpfc_eqcq_doorbell_eqid_lo_WORD word0 +#define lpfc_eqcq_doorbell_eqid_hi_SHIFT 11 +#define lpfc_eqcq_doorbell_eqid_hi_MASK 0x001F +#define lpfc_eqcq_doorbell_eqid_hi_WORD word0 +#define LPFC_CQID_HI_FIELD_SHIFT 10 +#define LPFC_EQID_HI_FIELD_SHIFT 9 #define LPFC_BMBX 0x0160 #define lpfc_bmbx_addr_SHIFT 2 @@ -845,18 +906,25 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_MQ_CREATE 0x15 #define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20 #define LPFC_MBOX_OPCODE_NOP 0x21 +#define LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY 0x29 #define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 #define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 #define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 #define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A #define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D +#define LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG 0x3E +#define LPFC_MBOX_OPCODE_SET_BOOT_CONFIG 0x43 #define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D #define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A +#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B +#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73 +#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74 #define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A #define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B #define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C #define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D #define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0 +#define LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES 0xA1 #define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4 #define LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG 0xA5 #define LPFC_MBOX_OPCODE_GET_PROFILE_LIST 0xA6 @@ -911,6 +979,13 @@ struct eq_context { uint32_t reserved3; }; +struct eq_delay_info { + uint32_t eq_id; + uint32_t phase; + uint32_t delay_multi; +}; +#define LPFC_MAX_EQ_DELAY 8 + struct sgl_page_pairs { uint32_t sgl_pg0_addr_lo; uint32_t sgl_pg0_addr_hi; @@ -973,6 +1048,19 @@ struct lpfc_mbx_eq_create { } u; }; +struct lpfc_mbx_modify_eq_delay { + struct mbox_header header; + union { + struct { + uint32_t num_eq; + struct eq_delay_info eq[LPFC_MAX_EQ_DELAY]; + } request; + struct { + uint32_t word0; + } response; + } u; +}; + struct lpfc_mbx_eq_destroy { struct mbox_header header; union { @@ -1069,12 +1157,22 @@ struct lpfc_mbx_wq_create { struct { /* Version 0 Request */ uint32_t word0; #define lpfc_mbx_wq_create_num_pages_SHIFT 0 -#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_num_pages_MASK 0x000000FF #define lpfc_mbx_wq_create_num_pages_WORD word0 +#define lpfc_mbx_wq_create_dua_SHIFT 8 +#define lpfc_mbx_wq_create_dua_MASK 0x00000001 +#define lpfc_mbx_wq_create_dua_WORD word0 #define lpfc_mbx_wq_create_cq_id_SHIFT 16 #define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF #define lpfc_mbx_wq_create_cq_id_WORD word0 - struct dma_address page[LPFC_MAX_WQ_PAGE]; + struct dma_address page[LPFC_MAX_WQ_PAGE_V0]; + uint32_t word9; +#define lpfc_mbx_wq_create_bua_SHIFT 0 +#define lpfc_mbx_wq_create_bua_MASK 0x00000001 +#define lpfc_mbx_wq_create_bua_WORD word9 +#define lpfc_mbx_wq_create_ulp_num_SHIFT 8 +#define lpfc_mbx_wq_create_ulp_num_MASK 0x000000FF +#define lpfc_mbx_wq_create_ulp_num_WORD word9 } request; struct { /* Version 1 Request */ uint32_t word0; /* Word 0 is the same as in v0 */ @@ -1098,6 +1196,17 @@ struct lpfc_mbx_wq_create { #define lpfc_mbx_wq_create_q_id_SHIFT 0 #define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF #define lpfc_mbx_wq_create_q_id_WORD word0 + uint32_t doorbell_offset; + uint32_t word2; +#define lpfc_mbx_wq_create_bar_set_SHIFT 0 +#define lpfc_mbx_wq_create_bar_set_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_bar_set_WORD word2 +#define WQ_PCI_BAR_0_AND_1 0x00 +#define WQ_PCI_BAR_2_AND_3 0x01 +#define WQ_PCI_BAR_4_AND_5 0x02 +#define lpfc_mbx_wq_create_db_format_SHIFT 16 +#define lpfc_mbx_wq_create_db_format_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_db_format_WORD word2 } response; } u; }; @@ -1161,14 +1270,31 @@ struct lpfc_mbx_rq_create { #define lpfc_mbx_rq_create_num_pages_SHIFT 0 #define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF #define lpfc_mbx_rq_create_num_pages_WORD word0 +#define lpfc_mbx_rq_create_dua_SHIFT 16 +#define lpfc_mbx_rq_create_dua_MASK 0x00000001 +#define lpfc_mbx_rq_create_dua_WORD word0 +#define lpfc_mbx_rq_create_bqu_SHIFT 17 +#define lpfc_mbx_rq_create_bqu_MASK 0x00000001 +#define lpfc_mbx_rq_create_bqu_WORD word0 +#define lpfc_mbx_rq_create_ulp_num_SHIFT 24 +#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF +#define lpfc_mbx_rq_create_ulp_num_WORD word0 struct rq_context context; struct dma_address page[LPFC_MAX_WQ_PAGE]; } request; struct { uint32_t word0; -#define lpfc_mbx_rq_create_q_id_SHIFT 0 -#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF -#define lpfc_mbx_rq_create_q_id_WORD word0 +#define lpfc_mbx_rq_create_q_id_SHIFT 0 +#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_q_id_WORD word0 + uint32_t doorbell_offset; + uint32_t word2; +#define lpfc_mbx_rq_create_bar_set_SHIFT 0 +#define lpfc_mbx_rq_create_bar_set_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_bar_set_WORD word2 +#define lpfc_mbx_rq_create_db_format_SHIFT 16 +#define lpfc_mbx_rq_create_db_format_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_db_format_WORD word2 } response; } u; }; @@ -1243,6 +1369,11 @@ struct lpfc_mbx_mq_create_ext { #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK #define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 #define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap +#define LPFC_EVT_CODE_LINK_NO_LINK 0x0 +#define LPFC_EVT_CODE_LINK_10_MBIT 0x1 +#define LPFC_EVT_CODE_LINK_100_MBIT 0x2 +#define LPFC_EVT_CODE_LINK_1_GBIT 0x3 +#define LPFC_EVT_CODE_LINK_10_GBIT 0x4 #define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT LPFC_TRAILER_CODE_FCOE #define lpfc_mbx_mq_create_ext_async_evt_fip_MASK 0x00000001 #define lpfc_mbx_mq_create_ext_async_evt_fip_WORD async_evt_bmap @@ -1252,6 +1383,13 @@ struct lpfc_mbx_mq_create_ext { #define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT LPFC_TRAILER_CODE_FC #define lpfc_mbx_mq_create_ext_async_evt_fc_MASK 0x00000001 #define lpfc_mbx_mq_create_ext_async_evt_fc_WORD async_evt_bmap +#define LPFC_EVT_CODE_FC_NO_LINK 0x0 +#define LPFC_EVT_CODE_FC_1_GBAUD 0x1 +#define LPFC_EVT_CODE_FC_2_GBAUD 0x2 +#define LPFC_EVT_CODE_FC_4_GBAUD 0x4 +#define LPFC_EVT_CODE_FC_8_GBAUD 0x8 +#define LPFC_EVT_CODE_FC_10_GBAUD 0xA +#define LPFC_EVT_CODE_FC_16_GBAUD 0x10 #define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT LPFC_TRAILER_CODE_SLI #define lpfc_mbx_mq_create_ext_async_evt_sli_MASK 0x00000001 #define lpfc_mbx_mq_create_ext_async_evt_sli_WORD async_evt_bmap @@ -1314,6 +1452,33 @@ struct lpfc_mbx_get_rsrc_extent_info { } u; }; +struct lpfc_mbx_query_fw_config { + struct mbox_header header; + struct { + uint32_t config_number; +#define LPFC_FC_FCOE 0x00000007 + uint32_t asic_revision; + uint32_t physical_port; + uint32_t function_mode; +#define LPFC_FCOE_INI_MODE 0x00000040 +#define LPFC_FCOE_TGT_MODE 0x00000080 +#define LPFC_DUA_MODE 0x00000800 + uint32_t ulp0_mode; +#define LPFC_ULP_FCOE_INIT_MODE 0x00000040 +#define LPFC_ULP_FCOE_TGT_MODE 0x00000080 + uint32_t ulp0_nap_words[12]; + uint32_t ulp1_mode; + uint32_t ulp1_nap_words[12]; + uint32_t function_capabilities; + uint32_t cqid_base; + uint32_t cqid_tot; + uint32_t eqid_base; + uint32_t eqid_tot; + uint32_t ulp0_nap2_words[2]; + uint32_t ulp1_nap2_words[2]; + } rsp; +}; + struct lpfc_id_range { uint32_t word5; #define lpfc_mbx_rsrc_id_word4_0_SHIFT 0 @@ -1332,6 +1497,11 @@ struct lpfc_mbx_set_link_diag_state { #define lpfc_mbx_set_diag_state_diag_SHIFT 0 #define lpfc_mbx_set_diag_state_diag_MASK 0x00000001 #define lpfc_mbx_set_diag_state_diag_WORD word0 +#define lpfc_mbx_set_diag_state_diag_bit_valid_SHIFT 2 +#define lpfc_mbx_set_diag_state_diag_bit_valid_MASK 0x00000001 +#define lpfc_mbx_set_diag_state_diag_bit_valid_WORD word0 +#define LPFC_DIAG_STATE_DIAG_BIT_VALID_NO_CHANGE 0 +#define LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE 1 #define lpfc_mbx_set_diag_state_link_num_SHIFT 16 #define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F #define lpfc_mbx_set_diag_state_link_num_WORD word0 @@ -1628,8 +1798,14 @@ struct fcf_record { #define lpfc_fcf_record_fc_map_2_MASK 0x000000FF #define lpfc_fcf_record_fc_map_2_WORD word7 #define lpfc_fcf_record_fcf_valid_SHIFT 24 -#define lpfc_fcf_record_fcf_valid_MASK 0x000000FF +#define lpfc_fcf_record_fcf_valid_MASK 0x00000001 #define lpfc_fcf_record_fcf_valid_WORD word7 +#define lpfc_fcf_record_fcf_fc_SHIFT 25 +#define lpfc_fcf_record_fcf_fc_MASK 0x00000001 +#define lpfc_fcf_record_fcf_fc_WORD word7 +#define lpfc_fcf_record_fcf_sol_SHIFT 31 +#define lpfc_fcf_record_fcf_sol_MASK 0x00000001 +#define lpfc_fcf_record_fcf_sol_WORD word7 uint32_t word8; #define lpfc_fcf_record_fcf_index_SHIFT 0 #define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF @@ -1718,51 +1894,6 @@ struct lpfc_mbx_redisc_fcf_tbl { #define lpfc_mbx_redisc_fcf_index_WORD word12 }; -struct lpfc_mbx_query_fw_cfg { - struct mbox_header header; - uint32_t config_number; - uint32_t asic_rev; - uint32_t phys_port; - uint32_t function_mode; -/* firmware Function Mode */ -#define lpfc_function_mode_toe_SHIFT 0 -#define lpfc_function_mode_toe_MASK 0x00000001 -#define lpfc_function_mode_toe_WORD function_mode -#define lpfc_function_mode_nic_SHIFT 1 -#define lpfc_function_mode_nic_MASK 0x00000001 -#define lpfc_function_mode_nic_WORD function_mode -#define lpfc_function_mode_rdma_SHIFT 2 -#define lpfc_function_mode_rdma_MASK 0x00000001 -#define lpfc_function_mode_rdma_WORD function_mode -#define lpfc_function_mode_vm_SHIFT 3 -#define lpfc_function_mode_vm_MASK 0x00000001 -#define lpfc_function_mode_vm_WORD function_mode -#define lpfc_function_mode_iscsi_i_SHIFT 4 -#define lpfc_function_mode_iscsi_i_MASK 0x00000001 -#define lpfc_function_mode_iscsi_i_WORD function_mode -#define lpfc_function_mode_iscsi_t_SHIFT 5 -#define lpfc_function_mode_iscsi_t_MASK 0x00000001 -#define lpfc_function_mode_iscsi_t_WORD function_mode -#define lpfc_function_mode_fcoe_i_SHIFT 6 -#define lpfc_function_mode_fcoe_i_MASK 0x00000001 -#define lpfc_function_mode_fcoe_i_WORD function_mode -#define lpfc_function_mode_fcoe_t_SHIFT 7 -#define lpfc_function_mode_fcoe_t_MASK 0x00000001 -#define lpfc_function_mode_fcoe_t_WORD function_mode -#define lpfc_function_mode_dal_SHIFT 8 -#define lpfc_function_mode_dal_MASK 0x00000001 -#define lpfc_function_mode_dal_WORD function_mode -#define lpfc_function_mode_lro_SHIFT 9 -#define lpfc_function_mode_lro_MASK 0x00000001 -#define lpfc_function_mode_lro_WORD function_mode -#define lpfc_function_mode_flex10_SHIFT 10 -#define lpfc_function_mode_flex10_MASK 0x00000001 -#define lpfc_function_mode_flex10_WORD function_mode -#define lpfc_function_mode_ncsi_SHIFT 11 -#define lpfc_function_mode_ncsi_MASK 0x00000001 -#define lpfc_function_mode_ncsi_WORD function_mode -}; - /* Status field for embedded SLI_CONFIG mailbox command */ #define STATUS_SUCCESS 0x0 #define STATUS_FAILED 0x1 @@ -1835,6 +1966,9 @@ struct lpfc_mbx_init_vfi { struct lpfc_mbx_reg_vfi { uint32_t word1; +#define lpfc_reg_vfi_upd_SHIFT 29 +#define lpfc_reg_vfi_upd_MASK 0x00000001 +#define lpfc_reg_vfi_upd_WORD word1 #define lpfc_reg_vfi_vp_SHIFT 28 #define lpfc_reg_vfi_vp_MASK 0x00000001 #define lpfc_reg_vfi_vp_WORD word1 @@ -2454,6 +2588,9 @@ struct lpfc_sli4_parameters { #define cfg_mqv_WORD word6 uint32_t word7; uint32_t word8; +#define cfg_wqsize_SHIFT 8 +#define cfg_wqsize_MASK 0x0000000f +#define cfg_wqsize_WORD word8 #define cfg_wqv_SHIFT 14 #define cfg_wqv_MASK 0x00000003 #define cfg_wqv_WORD word8 @@ -2479,6 +2616,9 @@ struct lpfc_sli4_parameters { #define cfg_phwq_SHIFT 15 #define cfg_phwq_MASK 0x00000001 #define cfg_phwq_WORD word12 +#define cfg_oas_SHIFT 25 +#define cfg_oas_MASK 0x00000001 +#define cfg_oas_WORD word12 #define cfg_loopbk_scope_SHIFT 28 #define cfg_loopbk_scope_MASK 0x0000000f #define cfg_loopbk_scope_WORD word12 @@ -2506,7 +2646,7 @@ struct lpfc_mbx_get_sli4_parameters { }; struct lpfc_rscr_desc_generic { -#define LPFC_RSRC_DESC_WSIZE 18 +#define LPFC_RSRC_DESC_WSIZE 22 uint32_t desc[LPFC_RSRC_DESC_WSIZE]; }; @@ -2516,6 +2656,9 @@ struct lpfc_rsrc_desc_pcie { #define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff #define lpfc_rsrc_desc_pcie_type_WORD word0 #define LPFC_RSRC_DESC_TYPE_PCIE 0x40 +#define lpfc_rsrc_desc_pcie_length_SHIFT 8 +#define lpfc_rsrc_desc_pcie_length_MASK 0x000000ff +#define lpfc_rsrc_desc_pcie_length_WORD word0 uint32_t word1; #define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0 #define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff @@ -2543,6 +2686,12 @@ struct lpfc_rsrc_desc_fcfcoe { #define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff #define lpfc_rsrc_desc_fcfcoe_type_WORD word0 #define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43 +#define lpfc_rsrc_desc_fcfcoe_length_SHIFT 8 +#define lpfc_rsrc_desc_fcfcoe_length_MASK 0x000000ff +#define lpfc_rsrc_desc_fcfcoe_length_WORD word0 +#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD 0 +#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH 72 +#define LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH 88 uint32_t word1; #define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0 #define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff @@ -2601,6 +2750,12 @@ struct lpfc_rsrc_desc_fcfcoe { #define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16 #define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff #define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13 +/* extended FC/FCoE Resource Descriptor when length = 88 bytes */ + uint32_t bw_min; + uint32_t bw_max; + uint32_t iops_min; + uint32_t iops_max; + uint32_t reserved[4]; }; struct lpfc_func_cfg { @@ -2846,6 +3001,7 @@ struct lpfc_mqe { struct lpfc_mbx_mq_create mq_create; struct lpfc_mbx_mq_create_ext mq_create_ext; struct lpfc_mbx_eq_create eq_create; + struct lpfc_mbx_modify_eq_delay eq_delay; struct lpfc_mbx_cq_create cq_create; struct lpfc_mbx_wq_create wq_create; struct lpfc_mbx_rq_create rq_create; @@ -2864,7 +3020,7 @@ struct lpfc_mqe { struct lpfc_mbx_read_config rd_config; struct lpfc_mbx_request_features req_ftrs; struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; - struct lpfc_mbx_query_fw_cfg query_fw_cfg; + struct lpfc_mbx_query_fw_config query_fw_cfg; struct lpfc_mbx_supp_pages supp_pages; struct lpfc_mbx_pc_sli4_params sli4_params; struct lpfc_mbx_get_sli4_parameters get_sli4_parameters; @@ -3055,6 +3211,28 @@ struct lpfc_acqe_fc_la { #define LPFC_FC_LA_EVENT_TYPE_SHARED_LINK 0x2 }; +struct lpfc_acqe_misconfigured_event { + struct { + uint32_t word0; +#define lpfc_sli_misconfigured_port0_SHIFT 0 +#define lpfc_sli_misconfigured_port0_MASK 0x000000FF +#define lpfc_sli_misconfigured_port0_WORD word0 +#define lpfc_sli_misconfigured_port1_SHIFT 8 +#define lpfc_sli_misconfigured_port1_MASK 0x000000FF +#define lpfc_sli_misconfigured_port1_WORD word0 +#define lpfc_sli_misconfigured_port2_SHIFT 16 +#define lpfc_sli_misconfigured_port2_MASK 0x000000FF +#define lpfc_sli_misconfigured_port2_WORD word0 +#define lpfc_sli_misconfigured_port3_SHIFT 24 +#define lpfc_sli_misconfigured_port3_MASK 0x000000FF +#define lpfc_sli_misconfigured_port3_WORD word0 + } theEvent; +#define LPFC_SLI_EVENT_STATUS_VALID 0x00 +#define LPFC_SLI_EVENT_STATUS_NOT_PRESENT 0x01 +#define LPFC_SLI_EVENT_STATUS_WRONG_TYPE 0x02 +#define LPFC_SLI_EVENT_STATUS_UNSUPPORTED 0x03 +}; + struct lpfc_acqe_sli { uint32_t event_data1; uint32_t event_data2; @@ -3065,6 +3243,7 @@ struct lpfc_acqe_sli { #define LPFC_SLI_EVENT_TYPE_NORM_TEMP 0x3 #define LPFC_SLI_EVENT_TYPE_NVLOG_POST 0x4 #define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5 +#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9 }; /* @@ -3095,6 +3274,9 @@ struct wqe_common { #define wqe_dif_SHIFT 0 #define wqe_dif_MASK 0x00000003 #define wqe_dif_WORD word7 +#define LPFC_WQE_DIF_PASSTHRU 1 +#define LPFC_WQE_DIF_STRIP 2 +#define LPFC_WQE_DIF_INSERT 3 #define wqe_ct_SHIFT 2 #define wqe_ct_MASK 0x00000003 #define wqe_ct_WORD word7 @@ -3143,6 +3325,9 @@ struct wqe_common { #define wqe_ebde_cnt_SHIFT 0 #define wqe_ebde_cnt_MASK 0x0000000f #define wqe_ebde_cnt_WORD word10 +#define wqe_oas_SHIFT 6 +#define wqe_oas_MASK 0x00000001 +#define wqe_oas_WORD word10 #define wqe_lenloc_SHIFT 7 #define wqe_lenloc_MASK 0x00000003 #define wqe_lenloc_WORD word10 @@ -3260,13 +3445,20 @@ struct els_request64_wqe { #define els_req64_hopcnt_SHIFT 24 #define els_req64_hopcnt_MASK 0x000000ff #define els_req64_hopcnt_WORD word13 - uint32_t reserved[2]; + uint32_t word14; + uint32_t max_response_payload_len; }; struct xmit_els_rsp64_wqe { struct ulp_bde64 bde; uint32_t response_payload_len; - uint32_t rsvd4; + uint32_t word4; +#define els_rsp64_sid_SHIFT 0 +#define els_rsp64_sid_MASK 0x00FFFFFF +#define els_rsp64_sid_WORD word4 +#define els_rsp64_sp_SHIFT 24 +#define els_rsp64_sp_MASK 0x00000001 +#define els_rsp64_sp_WORD word4 struct wqe_did wqe_dest; struct wqe_common wqe_com; /* words 6-11 */ uint32_t word12; @@ -3313,7 +3505,11 @@ struct xmit_bls_rsp64_wqe { uint32_t rsrvd4; struct wqe_did wqe_dest; struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; + uint32_t word12; +#define xmit_bls_rsp64_temprpi_SHIFT 0 +#define xmit_bls_rsp64_temprpi_MASK 0x0000ffff +#define xmit_bls_rsp64_temprpi_WORD word12 + uint32_t rsvd_13_15[3]; }; struct wqe_rctl_dfctl { @@ -3365,7 +3561,8 @@ struct gen_req64_wqe { uint32_t relative_offset; struct wqe_rctl_dfctl wge_ctl; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; + uint32_t rsvd_12_14[3]; + uint32_t max_response_payload_len; }; struct create_xri_wqe { @@ -3395,7 +3592,13 @@ struct abort_cmd_wqe { struct fcp_iwrite64_wqe { struct ulp_bde64 bde; - uint32_t payload_offset_len; + uint32_t word3; +#define cmd_buff_len_SHIFT 16 +#define cmd_buff_len_MASK 0x00000ffff +#define cmd_buff_len_WORD word3 +#define payload_offset_len_SHIFT 0 +#define payload_offset_len_MASK 0x0000ffff +#define payload_offset_len_WORD word3 uint32_t total_xfer_len; uint32_t initial_xfer_len; struct wqe_common wqe_com; /* words 6-11 */ @@ -3405,7 +3608,13 @@ struct fcp_iwrite64_wqe { struct fcp_iread64_wqe { struct ulp_bde64 bde; - uint32_t payload_offset_len; /* word 3 */ + uint32_t word3; +#define cmd_buff_len_SHIFT 16 +#define cmd_buff_len_MASK 0x00000ffff +#define cmd_buff_len_WORD word3 +#define payload_offset_len_SHIFT 0 +#define payload_offset_len_MASK 0x0000ffff +#define payload_offset_len_WORD word3 uint32_t total_xfer_len; /* word 4 */ uint32_t rsrvd5; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ @@ -3415,7 +3624,13 @@ struct fcp_iread64_wqe { struct fcp_icmnd64_wqe { struct ulp_bde64 bde; /* words 0-2 */ - uint32_t rsrvd3; /* word 3 */ + uint32_t word3; +#define cmd_buff_len_SHIFT 16 +#define cmd_buff_len_MASK 0x00000ffff +#define cmd_buff_len_WORD word3 +#define payload_offset_len_SHIFT 0 +#define payload_offset_len_MASK 0x0000ffff +#define payload_offset_len_WORD word3 uint32_t rsrvd4; /* word 4 */ uint32_t rsrvd5; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ @@ -3439,6 +3654,13 @@ union lpfc_wqe { struct gen_req64_wqe gen_req; }; +union lpfc_wqe128 { + uint32_t words[32]; + struct lpfc_wqe_generic generic; + struct xmit_seq64_wqe xmit_sequence; + struct gen_req64_wqe gen_req; +}; + #define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001 #define LPFC_FILE_TYPE_GROUP 0xf7 #define LPFC_FILE_ID_GROUP 0xa2 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index dfea2dada02..06f9a5b79e6 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -32,6 +32,8 @@ #include <linux/aer.h> #include <linux/slab.h> #include <linux/firmware.h> +#include <linux/miscdevice.h> +#include <linux/percpu.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -57,14 +59,18 @@ char *_dump_buf_dif; unsigned long _dump_buf_dif_order; spinlock_t _dump_buf_lock; +/* Used when mapping IRQ vectors in a driver centric manner */ +uint16_t *lpfc_used_cpu; +uint32_t lpfc_present_cpu; + static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); static int lpfc_sli4_queue_verify(struct lpfc_hba *); static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); static int lpfc_setup_endian_order(struct lpfc_hba *); static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); -static void lpfc_free_sgl_list(struct lpfc_hba *); -static int lpfc_init_sgl_list(struct lpfc_hba *); +static void lpfc_free_els_sgl_list(struct lpfc_hba *); +static void lpfc_init_sgl_list(struct lpfc_hba *); static int lpfc_init_active_sgl_array(struct lpfc_hba *); static void lpfc_free_active_sgl(struct lpfc_hba *); static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); @@ -72,6 +78,9 @@ static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); +static void lpfc_sli4_disable_intr(struct lpfc_hba *); +static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); +static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; @@ -464,10 +473,22 @@ lpfc_config_port_post(struct lpfc_hba *phba) lpfc_sli_read_link_ste(phba); /* Reset the DFT_HBA_Q_DEPTH to the max xri */ - if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) - phba->cfg_hba_queue_depth = - (mb->un.varRdConfig.max_xri + 1) - - lpfc_sli4_get_els_iocb_cnt(phba); + i = (mb->un.varRdConfig.max_xri + 1); + if (phba->cfg_hba_queue_depth > i) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3359 HBA queue depth changed from %d to %d\n", + phba->cfg_hba_queue_depth, i); + phba->cfg_hba_queue_depth = i; + } + + /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ + i = (mb->un.varRdConfig.max_xri >> 3); + if (phba->pport->cfg_lun_queue_depth > i) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3360 LUN queue depth changed from %d to %d\n", + phba->pport->cfg_lun_queue_depth, i); + phba->pport->cfg_lun_queue_depth = i; + } phba->lmt = mb->un.varRdConfig.lmt; @@ -477,11 +498,11 @@ lpfc_config_port_post(struct lpfc_hba *phba) phba->link_state = LPFC_LINK_DOWN; /* Only process IOCBs on ELS ring till hba_state is READY */ - if (psli->ring[psli->extra_ring].cmdringaddr) + if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr) psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; - if (psli->ring[psli->fcp_ring].cmdringaddr) + if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr) psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; - if (psli->ring[psli->next_ring].cmdringaddr) + if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr) psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; /* Post receive buffers for desired rings */ @@ -538,13 +559,16 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Set up ring-0 (ELS) timer */ timeout = phba->fc_ratov * 2; - mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); + mod_timer(&vport->els_tmofunc, + jiffies + msecs_to_jiffies(1000 * timeout)); /* Set up heart beat (HB) timer */ - mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + mod_timer(&phba->hb_tmofunc, + jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); phba->hb_outstanding = 0; phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ - mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); if (phba->hba_flag & LINK_DISABLED) { lpfc_printf_log(phba, @@ -796,58 +820,153 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) } /** - * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset + * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free + * rspiocb which got deferred + * * @phba: pointer to lpfc HBA data structure. * - * This routine will do uninitialization after the HBA is reset when bring - * down the SLI Layer. + * This routine will cleanup completed slow path events after HBA is reset + * when bringing down the SLI Layer. + * * * Return codes - * 0 - success. - * Any other value - error. + * void. **/ -static int -lpfc_hba_down_post_s3(struct lpfc_hba *phba) +static void +lpfc_sli4_free_sp_events(struct lpfc_hba *phba) +{ + struct lpfc_iocbq *rspiocbq; + struct hbq_dmabuf *dmabuf; + struct lpfc_cq_event *cq_event; + + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~HBA_SP_QUEUE_EVT; + spin_unlock_irq(&phba->hbalock); + + while (!list_empty(&phba->sli4_hba.sp_queue_event)) { + /* Get the response iocb from the head of work queue */ + spin_lock_irq(&phba->hbalock); + list_remove_head(&phba->sli4_hba.sp_queue_event, + cq_event, struct lpfc_cq_event, list); + spin_unlock_irq(&phba->hbalock); + + switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { + case CQE_CODE_COMPL_WQE: + rspiocbq = container_of(cq_event, struct lpfc_iocbq, + cq_event); + lpfc_sli_release_iocbq(phba, rspiocbq); + break; + case CQE_CODE_RECEIVE: + case CQE_CODE_RECEIVE_V1: + dmabuf = container_of(cq_event, struct hbq_dmabuf, + cq_event); + lpfc_in_buf_free(phba, &dmabuf->dbuf); + } + } +} + +/** + * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will cleanup posted ELS buffers after the HBA is reset + * when bringing down the SLI Layer. + * + * + * Return codes + * void. + **/ +static void +lpfc_hba_free_post_buf(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; struct lpfc_dmabuf *mp, *next_mp; - LIST_HEAD(completions); - int i; + LIST_HEAD(buflist); + int count; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) lpfc_sli_hbqbuf_free_all(phba); else { /* Cleanup preposted buffers on the ELS ring */ pring = &psli->ring[LPFC_ELS_RING]; - list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { + spin_lock_irq(&phba->hbalock); + list_splice_init(&pring->postbufq, &buflist); + spin_unlock_irq(&phba->hbalock); + + count = 0; + list_for_each_entry_safe(mp, next_mp, &buflist, list) { list_del(&mp->list); - pring->postbufq_cnt--; + count++; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } + + spin_lock_irq(&phba->hbalock); + pring->postbufq_cnt -= count; + spin_unlock_irq(&phba->hbalock); } +} + +/** + * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will cleanup the txcmplq after the HBA is reset when bringing + * down the SLI Layer. + * + * Return codes + * void + **/ +static void +lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + LIST_HEAD(completions); + int i; - spin_lock_irq(&phba->hbalock); for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; - + if (phba->sli_rev >= LPFC_SLI_REV4) + spin_lock_irq(&pring->ring_lock); + else + spin_lock_irq(&phba->hbalock); /* At this point in time the HBA is either reset or DOA. Either * way, nothing should be on txcmplq as it will NEVER complete. */ list_splice_init(&pring->txcmplq, &completions); pring->txcmplq_cnt = 0; - spin_unlock_irq(&phba->hbalock); + + if (phba->sli_rev >= LPFC_SLI_REV4) + spin_unlock_irq(&pring->ring_lock); + else + spin_unlock_irq(&phba->hbalock); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); - lpfc_sli_abort_iocb_ring(phba, pring); - spin_lock_irq(&phba->hbalock); } - spin_unlock_irq(&phba->hbalock); +} +/** + * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset + int i; + * @phba: pointer to lpfc HBA data structure. + * + * This routine will do uninitialization after the HBA is reset when bring + * down the SLI Layer. + * + * Return codes + * 0 - success. + * Any other value - error. + **/ +static int +lpfc_hba_down_post_s3(struct lpfc_hba *phba) +{ + lpfc_hba_free_post_buf(phba); + lpfc_hba_clean_txcmplq(phba); return 0; } @@ -867,13 +986,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) { struct lpfc_scsi_buf *psb, *psb_next; LIST_HEAD(aborts); - int ret; unsigned long iflag = 0; struct lpfc_sglq *sglq_entry = NULL; - ret = lpfc_hba_down_post_s3(phba); - if (ret) - return ret; + lpfc_hba_free_post_buf(phba); + lpfc_hba_clean_txcmplq(phba); + /* At this point in time the HBA is either reset or DOA. Either * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be * on the lpfc_sgl_list so that it can either be freed if the @@ -906,9 +1024,11 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) psb->pCmd = NULL; psb->status = IOSTAT_SUCCESS; } - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_splice(&aborts, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); + list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); + + lpfc_sli4_free_sp_events(phba); return 0; } @@ -983,9 +1103,14 @@ lpfc_rrq_timeout(unsigned long ptr) phba = (struct lpfc_hba *)ptr; spin_lock_irqsave(&phba->pport->work_port_lock, iflag); - phba->hba_flag |= HBA_RRQ_ACTIVE; + if (!(phba->pport->load_flag & FC_UNLOADING)) + phba->hba_flag |= HBA_RRQ_ACTIVE; + else + phba->hba_flag &= ~HBA_RRQ_ACTIVE; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); - lpfc_worker_wake_up(phba); + + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_worker_wake_up(phba); } /** @@ -1019,7 +1144,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) !(phba->link_state == LPFC_HBA_ERROR) && !(phba->pport->load_flag & FC_UNLOADING)) mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); return; } @@ -1062,15 +1188,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) spin_lock_irq(&phba->pport->work_port_lock); - if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, - jiffies)) { + if (time_after(phba->last_completion_time + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), + jiffies)) { spin_unlock_irq(&phba->pport->work_port_lock); if (!phba->hb_outstanding) mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); else mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); return; } spin_unlock_irq(&phba->pport->work_port_lock); @@ -1102,7 +1231,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) if (!pmboxq) { mod_timer(&phba->hb_tmofunc, jiffies + - HZ * LPFC_HB_MBOX_INTERVAL); + msecs_to_jiffies(1000 * + LPFC_HB_MBOX_INTERVAL)); return; } @@ -1118,7 +1248,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) phba->mbox_mem_pool); mod_timer(&phba->hb_tmofunc, jiffies + - HZ * LPFC_HB_MBOX_INTERVAL); + msecs_to_jiffies(1000 * + LPFC_HB_MBOX_INTERVAL)); return; } phba->skipped_hb = 0; @@ -1134,7 +1265,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) phba->skipped_hb = jiffies; mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); return; } else { /* @@ -1148,7 +1280,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) jiffies_to_msecs(jiffies - phba->last_completion_time)); mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); } } } @@ -1168,7 +1301,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); lpfc_offline(phba); lpfc_reset_barrier(phba); @@ -1189,10 +1322,10 @@ lpfc_offline_eratt(struct lpfc_hba *phba) * This routine is called to bring a SLI4 HBA offline when HBA hardware error * other than Port Error 6 has been detected. **/ -static void +void lpfc_sli4_offline_eratt(struct lpfc_hba *phba) { - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); lpfc_offline(phba); lpfc_sli4_brdreset(phba); lpfc_hba_down_post(phba); @@ -1214,7 +1347,6 @@ static void lpfc_handle_deferred_eratt(struct lpfc_hba *phba) { uint32_t old_host_status = phba->work_hs; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli = &phba->sli; /* If the pci channel is offline, ignore possible errors, @@ -1243,14 +1375,13 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) * dropped by the firmware. Error iocb (I/O) on txcmplq and let the * SCSI layer retry it after re-establishing link. */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_sli_abort_fcp_rings(phba); /* * There was a firmware error. Take the hba offline and then * attempt to restart it. */ - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); /* Wait for the ER1 bit to clear.*/ @@ -1312,7 +1443,6 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; uint32_t event_data; unsigned long temperature; struct temp_event temp_event_data; @@ -1364,14 +1494,13 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) * Error iocb (I/O) on txcmplq and let the SCSI layer * retry it after re-establishing link. */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_sli_abort_fcp_rings(phba); /* * There was a firmware error. Take the hba offline and then * attempt to restart it. */ - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); lpfc_offline(phba); lpfc_sli_brdrestart(phba); if (lpfc_online(phba) == 0) { /* Initialize the HBA */ @@ -1427,6 +1556,56 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) } /** + * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg + * @phba: pointer to lpfc hba data structure. + * @mbx_action: flag for mailbox shutdown action. + * + * This routine is invoked to perform an SLI4 port PCI function reset in + * response to port status register polling attention. It waits for port + * status register (ERR, RDY, RN) bits before proceeding with function reset. + * During this process, interrupt vectors are freed and later requested + * for handling possible port resource change. + **/ +static int +lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, + bool en_rn_msg) +{ + int rc; + uint32_t intr_mode; + + /* + * On error status condition, driver need to wait for port + * ready before performing reset. + */ + rc = lpfc_sli4_pdev_status_reg_wait(phba); + if (!rc) { + /* need reset: attempt for port recovery */ + if (en_rn_msg) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2887 Reset Needed: Attempting Port " + "Recovery...\n"); + lpfc_offline_prep(phba, mbx_action); + lpfc_offline(phba); + /* release interrupt for possible resource change */ + lpfc_sli4_disable_intr(phba); + lpfc_sli_brdrestart(phba); + /* request and enable interrupt */ + intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3175 Failed to enable interrupt\n"); + return -EIO; + } else { + phba->intr_mode = intr_mode; + } + rc = lpfc_online(phba); + if (rc == 0) + lpfc_unblock_mgmt_io(phba); + } + return rc; +} + +/** * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler * @phba: pointer to lpfc hba data structure. * @@ -1444,6 +1623,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) uint32_t reg_err1, reg_err2; uint32_t uerrlo_reg, uemasklo_reg; uint32_t pci_rd_rc1, pci_rd_rc2; + bool en_rn_msg = true; int rc; /* If the pci channel is offline, ignore possible errors, since @@ -1474,8 +1654,12 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) phba->sli4_hba.u.if_type2.STATUSregaddr, &portstat_reg.word0); /* consider PCI bus read error as pci_channel_offline */ - if (pci_rd_rc1 == -EIO) + if (pci_rd_rc1 == -EIO) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3151 PCI bus read access failure: x%x\n", + readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); return; + } reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { @@ -1490,10 +1674,12 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) break; } if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && - reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) + reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3143 Port Down: Firmware Restarted\n"); - else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && + "3143 Port Down: Firmware Update " + "Detected\n"); + en_rn_msg = false; + } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3144 Port Down: Debug Dump\n"); @@ -1501,30 +1687,22 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3145 Port Down: Provisioning\n"); - /* - * On error status condition, driver need to wait for port - * ready before performing reset. - */ - rc = lpfc_sli4_pdev_status_reg_wait(phba); - if (!rc) { - /* need reset: attempt for port recovery */ - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2887 Reset Needed: Attempting Port " - "Recovery...\n"); - lpfc_offline_prep(phba); - lpfc_offline(phba); - lpfc_sli_brdrestart(phba); - if (lpfc_online(phba) == 0) { - lpfc_unblock_mgmt_io(phba); - /* don't report event on forced debug dump */ - if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && - reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) - return; - else - break; - } - /* fall through for not able to recover */ + + /* Check port status register for function reset */ + rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, + en_rn_msg); + if (rc == 0) { + /* don't report event on forced debug dump */ + if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && + reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) + return; + else + break; } + /* fall through for not able to recover */ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3152 Unrecoverable error, bring the port " + "offline\n"); lpfc_sli4_offline_eratt(phba); break; case LPFC_SLI_INTF_IF_TYPE_1: @@ -1846,85 +2024,90 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) max_speed = 4; else if (phba->lmt & LMT_2Gb) max_speed = 2; - else + else if (phba->lmt & LMT_1Gb) max_speed = 1; + else + max_speed = 0; vp = &phba->vpd; switch (dev_id) { case PCI_DEVICE_ID_FIREFLY: - m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP6000", "PCI", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SUPERFLY: if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) - m = (typeof(m)){"LP7000", "PCI", - "Fibre Channel Adapter"}; + m = (typeof(m)){"LP7000", "PCI", ""}; else - m = (typeof(m)){"LP7000E", "PCI", - "Fibre Channel Adapter"}; + m = (typeof(m)){"LP7000E", "PCI", ""}; + m.function = "Obsolete, Unsupported Fibre Channel Adapter"; break; case PCI_DEVICE_ID_DRAGONFLY: m = (typeof(m)){"LP8000", "PCI", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_CENTAUR: if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) - m = (typeof(m)){"LP9002", "PCI", - "Fibre Channel Adapter"}; + m = (typeof(m)){"LP9002", "PCI", ""}; else - m = (typeof(m)){"LP9000", "PCI", - "Fibre Channel Adapter"}; + m = (typeof(m)){"LP9000", "PCI", ""}; + m.function = "Obsolete, Unsupported Fibre Channel Adapter"; break; case PCI_DEVICE_ID_RFLY: m = (typeof(m)){"LP952", "PCI", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PEGASUS: m = (typeof(m)){"LP9802", "PCI-X", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_THOR: m = (typeof(m)){"LP10000", "PCI-X", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_VIPER: m = (typeof(m)){"LPX1000", "PCI-X", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PFLY: m = (typeof(m)){"LP982", "PCI-X", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_TFLY: m = (typeof(m)){"LP1050", "PCI-X", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS: m = (typeof(m)){"LP11000", "PCI-X2", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS_SCSP: m = (typeof(m)){"LP11000-SP", "PCI-X2", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS_DCSP: m = (typeof(m)){"LP11002-SP", "PCI-X2", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE: - m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe1000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE_SCSP: - m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe1000-SP", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE_DCSP: - m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe1002-SP", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BMID: m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BSMB: - m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP111", "PCI-X2", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR: m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; @@ -1943,16 +2126,20 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP101: - m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP101", "PCI-X", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP10000S: - m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP10000-S", "PCI", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP11000S: - m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP11000-S", "PCI-X2", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LPE11000S: - m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe11000-S", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT: m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; @@ -1973,20 +2160,21 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HORNET: - m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; + m = (typeof(m)){"LP21000", "PCIe", + "Obsolete, Unsupported FCoE Adapter"}; GE = 1; break; case PCI_DEVICE_ID_PROTEUS_VF: m = (typeof(m)){"LPev12000", "PCIe IOV", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PROTEUS_PF: m = (typeof(m)){"LPev12000", "PCIe IOV", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PROTEUS_S: m = (typeof(m)){"LPemv12002-S", "PCIe IOV", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_TIGERSHARK: oneConnect = 1; @@ -2002,17 +2190,29 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) break; case PCI_DEVICE_ID_BALIUS: m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LANCER_FC: - case PCI_DEVICE_ID_LANCER_FC_VF: m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; break; + case PCI_DEVICE_ID_LANCER_FC_VF: + m = (typeof(m)){"LPe16000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; case PCI_DEVICE_ID_LANCER_FCOE: - case PCI_DEVICE_ID_LANCER_FCOE_VF: oneConnect = 1; m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; break; + case PCI_DEVICE_ID_LANCER_FCOE_VF: + oneConnect = 1; + m = (typeof(m)){"OCe15100", "PCIe", + "Obsolete, Unsupported FCoE"}; + break; + case PCI_DEVICE_ID_SKYHAWK: + case PCI_DEVICE_ID_SKYHAWK_VF: + oneConnect = 1; + m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; + break; default: m = (typeof(m)){"Unknown", "", ""}; break; @@ -2027,9 +2227,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) if (descp && descp[0] == '\0') { if (oneConnect) snprintf(descp, 255, - "Emulex OneConnect %s, %s Initiator, Port %s", + "Emulex OneConnect %s, %s Initiator %s", m.name, m.function, phba->Port); + else if (max_speed == 0) + snprintf(descp, 255, + "Emulex %s %s %s ", + m.name, m.bus, m.function); else snprintf(descp, 255, "Emulex %s %d%s %s %s", @@ -2333,13 +2537,20 @@ lpfc_cleanup(struct lpfc_vport *vport) continue; } + /* take care of nodes in unused state before the state + * machine taking action. + */ + if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { + lpfc_nlp_put(ndlp); + continue; + } + if (ndlp->nlp_type & NLP_FABRIC) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); - } /* At this point, ALL ndlp's should be gone @@ -2479,15 +2690,19 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba) * driver prepares the HBA interface for online or offline. **/ static void -lpfc_block_mgmt_io(struct lpfc_hba * phba) +lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) { unsigned long iflag; uint8_t actcmd = MBX_HEARTBEAT; unsigned long timeout; - timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; spin_lock_irqsave(&phba->hbalock, iflag); phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; + spin_unlock_irqrestore(&phba->hbalock, iflag); + if (mbx_action == LPFC_MBX_NO_WAIT) + return; + timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; + spin_lock_irqsave(&phba->hbalock, iflag); if (phba->sli.mbox_active) { actcmd = phba->sli.mbox_active->u.mb.mbxCommand; /* Determine how long we might wait for the active mailbox @@ -2513,6 +2728,42 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba) } /** + * lpfc_sli4_node_prep - Assign RPIs for active nodes. + * @phba: pointer to lpfc hba data structure. + * + * Allocate RPIs for all active remote nodes. This is needed whenever + * an SLI4 adapter is reset and the driver is not unloading. Its purpose + * is to fixup the temporary rpi assignments. + **/ +void +lpfc_sli4_node_prep(struct lpfc_hba *phba) +{ + struct lpfc_nodelist *ndlp, *next_ndlp; + struct lpfc_vport **vports; + int i; + + if (phba->sli_rev != LPFC_SLI_REV4) + return; + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + if (vports[i]->load_flag & FC_UNLOADING) + continue; + + list_for_each_entry_safe(ndlp, next_ndlp, + &vports[i]->fc_nodes, + nlp_listp) { + if (NLP_CHK_NODE_ACT(ndlp)) + ndlp->nlp_rpi = + lpfc_sli4_alloc_rpi(phba); + } + } + } + lpfc_destroy_vport_work_array(phba, vports); +} + +/** * lpfc_online - Initialize and bring a HBA online * @phba: pointer to lpfc hba data structure. * @@ -2530,6 +2781,7 @@ lpfc_online(struct lpfc_hba *phba) struct lpfc_vport *vport; struct lpfc_vport **vports; int i; + bool vpis_cleared = false; if (!phba) return 0; @@ -2541,7 +2793,7 @@ lpfc_online(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0458 Bring Adapter online\n"); - lpfc_block_mgmt_io(phba); + lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); if (!lpfc_sli_queue_setup(phba)) { lpfc_unblock_mgmt_io(phba); @@ -2553,6 +2805,10 @@ lpfc_online(struct lpfc_hba *phba) lpfc_unblock_mgmt_io(phba); return 1; } + spin_lock_irq(&phba->hbalock); + if (!phba->sli4_hba.max_cfg_param.vpi_used) + vpis_cleared = true; + spin_unlock_irq(&phba->hbalock); } else { if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ lpfc_unblock_mgmt_io(phba); @@ -2569,8 +2825,13 @@ lpfc_online(struct lpfc_hba *phba) vports[i]->fc_flag &= ~FC_OFFLINE_MODE; if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + if ((vpis_cleared) && + (vports[i]->port_type != + LPFC_PHYSICAL_PORT)) + vports[i]->vpi = 0; + } spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); @@ -2609,7 +2870,7 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba) * queue to make it ready to be brought offline. **/ void -lpfc_offline_prep(struct lpfc_hba * phba) +lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) { struct lpfc_vport *vport = phba->pport; struct lpfc_nodelist *ndlp, *next_ndlp; @@ -2620,7 +2881,7 @@ lpfc_offline_prep(struct lpfc_hba * phba) if (vport->fc_flag & FC_OFFLINE_MODE) return; - lpfc_block_mgmt_io(phba); + lpfc_block_mgmt_io(phba, mbx_action); lpfc_linkdown(phba); @@ -2654,13 +2915,20 @@ lpfc_offline_prep(struct lpfc_hba * phba) spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(shost->host_lock); + /* + * Whenever an SLI4 port goes offline, free the + * RPI. Get a new RPI when the adapter port + * comes back online. + */ + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); lpfc_unreg_rpi(vports[i], ndlp); } } } lpfc_destroy_vport_work_array(phba, vports); - lpfc_sli_mbox_sys_shutdown(phba); + lpfc_sli_mbox_sys_shutdown(phba, mbx_action); } /** @@ -2709,59 +2977,44 @@ lpfc_offline(struct lpfc_hba *phba) } /** - * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated. - * @phba: pointer to lpfc hba data structure. - * - * This routine goes through all the scsi buffers in the system and updates the - * Physical XRIs assigned to the SCSI buffer because these may change after any - * firmware reset - * - * Return codes - * 0 - successful (for now, it always returns 0) - **/ -int -lpfc_scsi_buf_update(struct lpfc_hba *phba) -{ - struct lpfc_scsi_buf *sb, *sb_next; - - spin_lock_irq(&phba->hbalock); - spin_lock(&phba->scsi_buf_list_lock); - list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) - sb->cur_iocbq.sli4_xritag = - phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag]; - spin_unlock(&phba->scsi_buf_list_lock); - spin_unlock_irq(&phba->hbalock); - return 0; -} - -/** * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists * @phba: pointer to lpfc hba data structure. * * This routine is to free all the SCSI buffers and IOCBs from the driver * list back to kernel. It is called from lpfc_pci_remove_one to free * the internal resources before the device is removed from the system. - * - * Return codes - * 0 - successful (for now, it always returns 0) **/ -static int +static void lpfc_scsi_free(struct lpfc_hba *phba) { struct lpfc_scsi_buf *sb, *sb_next; struct lpfc_iocbq *io, *io_next; spin_lock_irq(&phba->hbalock); + /* Release all the lpfc_scsi_bufs maintained by this host. */ - spin_lock(&phba->scsi_buf_list_lock); - list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { + + spin_lock(&phba->scsi_buf_list_put_lock); + list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, + list) { + list_del(&sb->list); + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, + sb->dma_handle); + kfree(sb); + phba->total_scsi_bufs--; + } + spin_unlock(&phba->scsi_buf_list_put_lock); + + spin_lock(&phba->scsi_buf_list_get_lock); + list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, + list) { list_del(&sb->list); pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; } - spin_unlock(&phba->scsi_buf_list_lock); + spin_unlock(&phba->scsi_buf_list_get_lock); /* Release all the lpfc_iocbq entries maintained by this host. */ list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { @@ -2771,7 +3024,184 @@ lpfc_scsi_free(struct lpfc_hba *phba) } spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping + * @phba: pointer to lpfc hba data structure. + * + * This routine first calculates the sizes of the current els and allocated + * scsi sgl lists, and then goes through all sgls to updates the physical + * XRIs assigned due to port function reset. During port initialization, the + * current els and allocated scsi sgl lists are 0s. + * + * Return codes + * 0 - successful (for now, it always returns 0) + **/ +int +lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) +{ + struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; + struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL; + uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt; + LIST_HEAD(els_sgl_list); + LIST_HEAD(scsi_sgl_list); + int rc; + + /* + * update on pci function's els xri-sgl list + */ + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { + /* els xri-sgl expanded */ + xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3157 ELS xri-sgl count increased from " + "%d to %d\n", phba->sli4_hba.els_xri_cnt, + els_xri_cnt); + /* allocate the additional els sgls */ + for (i = 0; i < xri_cnt; i++) { + sglq_entry = kzalloc(sizeof(struct lpfc_sglq), + GFP_KERNEL); + if (sglq_entry == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2562 Failure to allocate an " + "ELS sgl entry:%d\n", i); + rc = -ENOMEM; + goto out_free_mem; + } + sglq_entry->buff_type = GEN_BUFF_TYPE; + sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, + &sglq_entry->phys); + if (sglq_entry->virt == NULL) { + kfree(sglq_entry); + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2563 Failure to allocate an " + "ELS mbuf:%d\n", i); + rc = -ENOMEM; + goto out_free_mem; + } + sglq_entry->sgl = sglq_entry->virt; + memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); + sglq_entry->state = SGL_FREED; + list_add_tail(&sglq_entry->list, &els_sgl_list); + } + spin_lock_irq(&phba->hbalock); + list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); + spin_unlock_irq(&phba->hbalock); + } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { + /* els xri-sgl shrinked */ + xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3158 ELS xri-sgl count decreased from " + "%d to %d\n", phba->sli4_hba.els_xri_cnt, + els_xri_cnt); + spin_lock_irq(&phba->hbalock); + list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list); + spin_unlock_irq(&phba->hbalock); + /* release extra els sgls from list */ + for (i = 0; i < xri_cnt; i++) { + list_remove_head(&els_sgl_list, + sglq_entry, struct lpfc_sglq, list); + if (sglq_entry) { + lpfc_mbuf_free(phba, sglq_entry->virt, + sglq_entry->phys); + kfree(sglq_entry); + } + } + spin_lock_irq(&phba->hbalock); + list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); + spin_unlock_irq(&phba->hbalock); + } else + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3163 ELS xri-sgl count unchanged: %d\n", + els_xri_cnt); + phba->sli4_hba.els_xri_cnt = els_xri_cnt; + + /* update xris to els sgls on the list */ + sglq_entry = NULL; + sglq_entry_next = NULL; + list_for_each_entry_safe(sglq_entry, sglq_entry_next, + &phba->sli4_hba.lpfc_sgl_list, list) { + lxri = lpfc_sli4_next_xritag(phba); + if (lxri == NO_XRI) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2400 Failed to allocate xri for " + "ELS sgl\n"); + rc = -ENOMEM; + goto out_free_mem; + } + sglq_entry->sli4_lxritag = lxri; + sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + } + + /* + * update on pci function's allocated scsi xri-sgl list + */ + phba->total_scsi_bufs = 0; + + /* maximum number of xris available for scsi buffers */ + phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - + els_xri_cnt; + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2401 Current allocated SCSI xri-sgl count:%d, " + "maximum SCSI xri count:%d\n", + phba->sli4_hba.scsi_xri_cnt, + phba->sli4_hba.scsi_xri_max); + + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock(&phba->scsi_buf_list_put_lock); + list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); + list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); + spin_unlock(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); + + if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { + /* max scsi xri shrinked below the allocated scsi buffers */ + scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - + phba->sli4_hba.scsi_xri_max; + /* release the extra allocated scsi buffers */ + for (i = 0; i < scsi_xri_cnt; i++) { + list_remove_head(&scsi_sgl_list, psb, + struct lpfc_scsi_buf, list); + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data, + psb->dma_handle); + kfree(psb); + } + spin_lock_irq(&phba->scsi_buf_list_get_lock); + phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; + spin_unlock_irq(&phba->scsi_buf_list_get_lock); + } + + /* update xris associated to remaining allocated scsi buffers */ + psb = NULL; + psb_next = NULL; + list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) { + lxri = lpfc_sli4_next_xritag(phba); + if (lxri == NO_XRI) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2560 Failed to allocate xri for " + "scsi buffer\n"); + rc = -ENOMEM; + goto out_free_mem; + } + psb->cur_iocbq.sli4_lxritag = lxri; + psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + } + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock(&phba->scsi_buf_list_put_lock); + list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + spin_unlock(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); + return 0; + +out_free_mem: + lpfc_free_els_sgl_list(phba); + lpfc_scsi_free(phba); + return rc; } /** @@ -2912,14 +3342,10 @@ destroy_port(struct lpfc_vport *vport) int lpfc_get_instance(void) { - int instance = 0; + int ret; - /* Assign an unused number */ - if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) - return -1; - if (idr_get_new(&lpfc_hba_index, NULL, &instance)) - return -1; - return instance; + ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); + return ret < 0 ? -1 : ret; } /** @@ -2949,14 +3375,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) stat = 1; goto finished; } - if (time >= 30 * HZ) { + if (time >= msecs_to_jiffies(30 * 1000)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0461 Scanning longer than 30 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } - if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { + if (time >= msecs_to_jiffies(15 * 1000) && + phba->link_state <= LPFC_LINK_DOWN) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0465 Link down longer than 15 " "seconds. Continuing initialization\n"); @@ -2968,7 +3395,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) goto finished; if (vport->num_disc_nodes || vport->fc_prli_sent) goto finished; - if (vport->fc_map_cnt == 0 && time < 2 * HZ) + if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) goto finished; if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) goto finished; @@ -3255,6 +3682,119 @@ lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, } /** + * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed + * @phba: pointer to lpfc hba data structure. + * + * This routine is to get an SLI3 FC port's link speed in Mbps. + * + * Return: link speed in terms of Mbps. + **/ +uint32_t +lpfc_sli_port_speed_get(struct lpfc_hba *phba) +{ + uint32_t link_speed; + + if (!lpfc_is_link_up(phba)) + return 0; + + switch (phba->fc_linkspeed) { + case LPFC_LINK_SPEED_1GHZ: + link_speed = 1000; + break; + case LPFC_LINK_SPEED_2GHZ: + link_speed = 2000; + break; + case LPFC_LINK_SPEED_4GHZ: + link_speed = 4000; + break; + case LPFC_LINK_SPEED_8GHZ: + link_speed = 8000; + break; + case LPFC_LINK_SPEED_10GHZ: + link_speed = 10000; + break; + case LPFC_LINK_SPEED_16GHZ: + link_speed = 16000; + break; + default: + link_speed = 0; + } + return link_speed; +} + +/** + * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed + * @phba: pointer to lpfc hba data structure. + * @evt_code: asynchronous event code. + * @speed_code: asynchronous event link speed code. + * + * This routine is to parse the giving SLI4 async event link speed code into + * value of Mbps for the link speed. + * + * Return: link speed in terms of Mbps. + **/ +static uint32_t +lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, + uint8_t speed_code) +{ + uint32_t port_speed; + + switch (evt_code) { + case LPFC_TRAILER_CODE_LINK: + switch (speed_code) { + case LPFC_EVT_CODE_LINK_NO_LINK: + port_speed = 0; + break; + case LPFC_EVT_CODE_LINK_10_MBIT: + port_speed = 10; + break; + case LPFC_EVT_CODE_LINK_100_MBIT: + port_speed = 100; + break; + case LPFC_EVT_CODE_LINK_1_GBIT: + port_speed = 1000; + break; + case LPFC_EVT_CODE_LINK_10_GBIT: + port_speed = 10000; + break; + default: + port_speed = 0; + } + break; + case LPFC_TRAILER_CODE_FC: + switch (speed_code) { + case LPFC_EVT_CODE_FC_NO_LINK: + port_speed = 0; + break; + case LPFC_EVT_CODE_FC_1_GBAUD: + port_speed = 1000; + break; + case LPFC_EVT_CODE_FC_2_GBAUD: + port_speed = 2000; + break; + case LPFC_EVT_CODE_FC_4_GBAUD: + port_speed = 4000; + break; + case LPFC_EVT_CODE_FC_8_GBAUD: + port_speed = 8000; + break; + case LPFC_EVT_CODE_FC_10_GBAUD: + port_speed = 10000; + break; + case LPFC_EVT_CODE_FC_16_GBAUD: + port_speed = 16000; + break; + default: + port_speed = 0; + } + break; + default: + port_speed = 0; + } + return port_speed; +} + +/** * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async link completion queue entry. @@ -3311,7 +3851,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, /* Keep the link status for extra SLI4 state machine reference */ phba->sli4_hba.link_state.speed = - bf_get(lpfc_acqe_link_speed, acqe_link); + lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, + bf_get(lpfc_acqe_link_speed, acqe_link)); phba->sli4_hba.link_state.duplex = bf_get(lpfc_acqe_link_duplex, acqe_link); phba->sli4_hba.link_state.status = @@ -3323,7 +3864,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, phba->sli4_hba.link_state.fault = bf_get(lpfc_acqe_link_fault, acqe_link); phba->sli4_hba.link_state.logical_speed = - bf_get(lpfc_acqe_logical_link_speed, acqe_link); + bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2900 Async FC/FCoE Link event - Speed:%dGBit " "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " @@ -3333,7 +3875,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, phba->sli4_hba.link_state.status, phba->sli4_hba.link_state.type, phba->sli4_hba.link_state.number, - phba->sli4_hba.link_state.logical_speed * 10, + phba->sli4_hba.link_state.logical_speed, phba->sli4_hba.link_state.fault); /* * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch @@ -3405,7 +3947,8 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) } /* Keep the link status for extra SLI4 state machine reference */ phba->sli4_hba.link_state.speed = - bf_get(lpfc_acqe_fc_la_speed, acqe_fc); + lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, + bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; phba->sli4_hba.link_state.topology = bf_get(lpfc_acqe_fc_la_topology, acqe_fc); @@ -3418,7 +3961,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) phba->sli4_hba.link_state.fault = bf_get(lpfc_acqe_link_fault, acqe_fc); phba->sli4_hba.link_state.logical_speed = - bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); + bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2896 Async FC event - Speed:%dGBaud Topology:x%x " "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" @@ -3428,7 +3971,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) phba->sli4_hba.link_state.status, phba->sli4_hba.link_state.type, phba->sli4_hba.link_state.number, - phba->sli4_hba.link_state.logical_speed * 10, + phba->sli4_hba.link_state.logical_speed, phba->sli4_hba.link_state.fault); pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { @@ -3484,12 +4027,80 @@ out_free_pmb: static void lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) { - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "2901 Async SLI event - Event Data1:x%08x Event Data2:" - "x%08x SLI Event Type:%d", - acqe_sli->event_data1, acqe_sli->event_data2, - bf_get(lpfc_trailer_type, acqe_sli)); - return; + char port_name; + char message[128]; + uint8_t status; + struct lpfc_acqe_misconfigured_event *misconfigured; + + /* special case misconfigured event as it contains data for all ports */ + if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + LPFC_SLI_INTF_IF_TYPE_2) || + (bf_get(lpfc_trailer_type, acqe_sli) != + LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2901 Async SLI event - Event Data1:x%08x Event Data2:" + "x%08x SLI Event Type:%d\n", + acqe_sli->event_data1, acqe_sli->event_data2, + bf_get(lpfc_trailer_type, acqe_sli)); + return; + } + + port_name = phba->Port[0]; + if (port_name == 0x00) + port_name = '?'; /* get port name is empty */ + + misconfigured = (struct lpfc_acqe_misconfigured_event *) + &acqe_sli->event_data1; + + /* fetch the status for this port */ + switch (phba->sli4_hba.lnk_info.lnk_no) { + case LPFC_LINK_NUMBER_0: + status = bf_get(lpfc_sli_misconfigured_port0, + &misconfigured->theEvent); + break; + case LPFC_LINK_NUMBER_1: + status = bf_get(lpfc_sli_misconfigured_port1, + &misconfigured->theEvent); + break; + case LPFC_LINK_NUMBER_2: + status = bf_get(lpfc_sli_misconfigured_port2, + &misconfigured->theEvent); + break; + case LPFC_LINK_NUMBER_3: + status = bf_get(lpfc_sli_misconfigured_port3, + &misconfigured->theEvent); + break; + default: + status = ~LPFC_SLI_EVENT_STATUS_VALID; + break; + } + + switch (status) { + case LPFC_SLI_EVENT_STATUS_VALID: + return; /* no message if the sfp is okay */ + case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: + sprintf(message, "Optics faulted/incorrectly installed/not " \ + "installed - Reseat optics, if issue not " + "resolved, replace."); + break; + case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: + sprintf(message, + "Optics of two types installed - Remove one optic or " \ + "install matching pair of optics."); + break; + case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: + sprintf(message, "Incompatible optics - Replace with " \ + "compatible optics for card to function."); + break; + default: + /* firmware is reporting a status we don't know about */ + sprintf(message, "Unknown event status x%02x", status); + break; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3176 Misconfigured Physical Port - " + "Port Name %c %s\n", port_name, message); } /** @@ -3663,6 +4274,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, break; case LPFC_FIP_EVENT_TYPE_FCF_DEAD: + phba->fcoe_cvl_eventtag = acqe_fip->event_tag; lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2549 FCF (x%x) disconnected from network, " "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); @@ -3724,6 +4336,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, } break; case LPFC_FIP_EVENT_TYPE_CVL: + phba->fcoe_cvl_eventtag = acqe_fip->event_tag; lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2718 Clear Virtual Link Received for VPI 0x%x" " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); @@ -3754,7 +4367,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; @@ -3848,11 +4462,11 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, phba->fcoe_eventtag = acqe_grp5->event_tag; prev_ll_spd = phba->sli4_hba.link_state.logical_speed; phba->sli4_hba.link_state.logical_speed = - (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); + (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2789 GRP5 Async Event: Updating logical link speed " - "from %dMbps to %dMbps\n", (prev_ll_spd * 10), - (phba->sli4_hba.link_state.logical_speed*10)); + "from %dMbps to %dMbps\n", prev_ll_spd, + phba->sli4_hba.link_state.logical_speed); } /** @@ -4051,7 +4665,7 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba) pci_save_state(pdev); /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ - if (pci_find_capability(pdev, PCI_CAP_ID_EXP)) + if (pci_is_pcie(pdev)) pdev->needs_freset = 1; return 0; @@ -4087,8 +4701,6 @@ lpfc_disable_pci_dev(struct lpfc_hba *phba) /* Release PCI resource and disable PCI device */ pci_release_selected_regions(pdev, bars); pci_disable_device(pdev); - /* Null out PCI private reference to driver */ - pci_set_drvdata(pdev, NULL); return; } @@ -4110,7 +4722,10 @@ lpfc_reset_hba(struct lpfc_hba *phba) phba->link_state = LPFC_HBA_ERROR; return; } - lpfc_offline_prep(phba); + if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) + lpfc_offline_prep(phba, LPFC_MBX_WAIT); + else + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); lpfc_offline(phba); lpfc_sli_brdrestart(phba); lpfc_online(phba); @@ -4238,24 +4853,60 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; } + if (!phba->sli.ring) + phba->sli.ring = (struct lpfc_sli_ring *) + kzalloc(LPFC_SLI3_MAX_RING * + sizeof(struct lpfc_sli_ring), GFP_KERNEL); + if (!phba->sli.ring) + return -ENOMEM; + /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. */ - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + /* Initialize the host templates the configured values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + + /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ if (phba->cfg_enable_bg) { - phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; - phba->cfg_sg_dma_buf_size += - phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); + /* + * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, + * the FCP rsp, and a BDE for each. Sice we have no control + * over how many protection data segments the SCSI Layer + * will hand us (ie: there could be one for every block + * in the IO), we just allocate enough BDEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt to + * minimize the risk of running out. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); + + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; + + /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ + phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; + } else { + /* + * The scsi_buf for a regular I/O will hold the FCP cmnd, + * the FCP rsp, a BDE for each, and a BDE for up to + * cfg_sg_seg_cnt data segments. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + + /* Total BDEs in BPL for scsi_sg_list */ + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; } - /* Also reinitialize the host templates with new values. */ - lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; - lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, + phba->cfg_total_seg_cnt); phba->max_vpi = LPFC_MAX_VPI; /* This will be set to correct value after config_port mbox */ @@ -4321,12 +4972,17 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) static int lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) { + struct lpfc_vector_map_info *cpup; struct lpfc_sli *psli; LPFC_MBOXQ_t *mboxq; - int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; + int rc, i, hbq_count, max_buf_size; uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; struct lpfc_mqe *mqe; - int longs, sli_family; + int longs; + int fof_vectors = 0; + + /* Get all the module params for configuring this host */ + lpfc_get_cfgparam(phba); /* Before proceed, wait for POST done and device ready */ rc = lpfc_sli4_post_status_check(phba); @@ -4371,16 +5027,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) sizeof(struct lpfc_mbox_ext_buf_ctx)); INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); - /* - * We need to do a READ_CONFIG mailbox command here before - * calling lpfc_get_cfgparam. For VFs this will report the - * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. - * All of the resources allocated - * for this Port are tied to these values. - */ - /* Get all the module params for configuring this host */ - lpfc_get_cfgparam(phba); phba->max_vpi = LPFC_MAX_VPI; + /* This will be set to correct value after the read_config mbox */ phba->max_vports = 0; @@ -4391,40 +5039,80 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands + * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. + */ + if (!phba->sli.ring) + phba->sli.ring = kzalloc( + (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) * + sizeof(struct lpfc_sli_ring), GFP_KERNEL); + if (!phba->sli.ring) + return -ENOMEM; + + /* + * It doesn't matter what family our adapter is in, we are + * limited to 2 Pages, 512 SGEs, for our SGL. + * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp + */ + max_buf_size = (2 * SLI4_PAGE_SIZE); + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) + phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; + + /* + * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. - * To insure that the scsi sgl does not cross a 4k page boundary only - * sgl sizes of must be a power of 2. */ - buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); - - sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); - max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; - switch (sli_family) { - case LPFC_SLI_INTF_FAMILY_BE2: - case LPFC_SLI_INTF_FAMILY_BE3: - /* There is a single hint for BE - 2 pages per BPL. */ - if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == - LPFC_SLI_INTF_SLI_HINT1_1) - max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; - break; - case LPFC_SLI_INTF_FAMILY_LNCR_A0: - case LPFC_SLI_INTF_FAMILY_LNCR_B0: - default: - break; + + if (phba->cfg_enable_bg) { + /* + * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, + * the FCP rsp, and a SGE for each. Sice we have no control + * over how many protection data segments the SCSI Layer + * will hand us (ie: there could be one for every block + * in the IO), we just allocate enough SGEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt to + * minimize the risk of running out. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + max_buf_size; + + /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ + phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; + + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF; + } else { + /* + * The scsi_buf for a regular I/O will hold the FCP cmnd, + * the FCP rsp, a SGE for each, and a SGE for up to + * cfg_sg_seg_cnt data segments. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); + + /* Total SGEs for scsi_sg_list */ + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; + /* + * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need + * to post 1 page for the SGL. + */ } - for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; - dma_buf_size < max_buf_size && buf_size > dma_buf_size; - dma_buf_size = dma_buf_size << 1) - ; - if (dma_buf_size == max_buf_size) - phba->cfg_sg_seg_cnt = (dma_buf_size - - sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - - (2 * sizeof(struct sli4_sge))) / - sizeof(struct sli4_sge); - phba->cfg_sg_dma_buf_size = dma_buf_size; + + /* Initialize the host templates with the updated values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + + if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) + phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; + else + phba->cfg_sg_dma_buf_size = + SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, + phba->cfg_total_seg_cnt); /* Initialize buffer queue management fields */ hbq_count = lpfc_sli_hbq_count(); @@ -4497,6 +5185,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) rc = lpfc_sli4_read_config(phba); if (unlikely(rc)) goto out_free_bsmbx; + rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); + if (unlikely(rc)) + goto out_free_bsmbx; /* IF Type 0 ports get initialized now. */ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == @@ -4554,6 +5245,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) } } mempool_free(mboxq, phba->mbox_mem_pool); + + /* Verify OAS is supported */ + lpfc_sli4_oas_verify(phba); + if (phba->cfg_fof) + fof_vectors = 1; + /* Verify all the SLI4 queues */ rc = lpfc_sli4_queue_verify(phba); if (rc) @@ -4564,18 +5261,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) if (rc) goto out_free_bsmbx; - /* Initialize and populate the iocb list per host */ - rc = lpfc_init_sgl_list(phba); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1400 Failed to initialize sgl list.\n"); - goto out_destroy_cq_event_pool; - } + /* Initialize sgl lists per host */ + lpfc_init_sgl_list(phba); + + /* Allocate and initialize active sgl array */ rc = lpfc_init_active_sgl_array(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1430 Failed to initialize sgl list.\n"); - goto out_free_sgl_list; + goto out_destroy_cq_event_pool; } rc = lpfc_sli4_init_rpi_hdrs(phba); if (rc) { @@ -4596,25 +5290,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_remove_rpi_hdrs; } - /* - * The cfg_fcp_eq_count can be zero whenever there is exactly one - * interrupt vector. This is not an error - */ - if (phba->cfg_fcp_eq_count) { - phba->sli4_hba.fcp_eq_hdl = - kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * - phba->cfg_fcp_eq_count), GFP_KERNEL); - if (!phba->sli4_hba.fcp_eq_hdl) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2572 Failed allocate memory for " - "fast-path per-EQ handle array\n"); - rc = -ENOMEM; - goto out_free_fcf_rr_bmask; - } + phba->sli4_hba.fcp_eq_hdl = + kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * + (fof_vectors + phba->cfg_fcp_io_channel)), + GFP_KERNEL); + if (!phba->sli4_hba.fcp_eq_hdl) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2572 Failed allocate memory for " + "fast-path per-EQ handle array\n"); + rc = -ENOMEM; + goto out_free_fcf_rr_bmask; } phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * - phba->sli4_hba.cfg_eqn), GFP_KERNEL); + (fof_vectors + + phba->cfg_fcp_io_channel)), GFP_KERNEL); if (!phba->sli4_hba.msix_entries) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2573 Failed allocate memory for msi-x " @@ -4623,6 +5313,41 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_fcp_eq_hdl; } + phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) * + phba->sli4_hba.num_present_cpu), + GFP_KERNEL); + if (!phba->sli4_hba.cpu_map) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3327 Failed allocate memory for msi-x " + "interrupt vector mapping\n"); + rc = -ENOMEM; + goto out_free_msix; + } + if (lpfc_used_cpu == NULL) { + lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu), + GFP_KERNEL); + if (!lpfc_used_cpu) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3335 Failed allocate memory for msi-x " + "interrupt vector mapping\n"); + kfree(phba->sli4_hba.cpu_map); + rc = -ENOMEM; + goto out_free_msix; + } + for (i = 0; i < lpfc_present_cpu; i++) + lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; + } + + /* Initialize io channels for round robin */ + cpup = phba->sli4_hba.cpu_map; + rc = 0; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + cpup->channel_id = rc; + rc++; + if (rc >= phba->cfg_fcp_io_channel) + rc = 0; + } + /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. @@ -4642,6 +5367,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) return 0; +out_free_msix: + kfree(phba->sli4_hba.msix_entries); out_free_fcp_eq_hdl: kfree(phba->sli4_hba.fcp_eq_hdl); out_free_fcf_rr_bmask: @@ -4650,8 +5377,6 @@ out_remove_rpi_hdrs: lpfc_sli4_remove_rpi_hdrs(phba); out_free_active_sgl: lpfc_free_active_sgl(phba); -out_free_sgl_list: - lpfc_free_sgl_list(phba); out_destroy_cq_event_pool: lpfc_sli4_cq_event_pool_destroy(phba); out_free_bsmbx: @@ -4673,6 +5398,12 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) { struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; + /* Free memory allocated for msi-x interrupt vector to CPU mapping */ + kfree(phba->sli4_hba.cpu_map); + phba->sli4_hba.num_present_cpu = 0; + phba->sli4_hba.num_online_cpu = 0; + phba->sli4_hba.curr_disp_cpu = 0; + /* Free memory allocated for msi-x interrupt vector entries */ kfree(phba->sli4_hba.msix_entries); @@ -4688,10 +5419,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) /* Free the ELS sgl list */ lpfc_free_active_sgl(phba); - lpfc_free_sgl_list(phba); - - /* Free the SCSI sgl management array */ - kfree(phba->sli4_hba.lpfc_scsi_psb_array); + lpfc_free_els_sgl_list(phba); /* Free the completion queue EQ event pool */ lpfc_sli4_cq_event_release_all(phba); @@ -4784,8 +5512,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) init_waitqueue_head(&phba->work_waitq); /* Initialize the scsi buffer list used by driver for scsi IO */ - spin_lock_init(&phba->scsi_buf_list_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); + spin_lock_init(&phba->scsi_buf_list_get_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); + spin_lock_init(&phba->scsi_buf_list_put_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); /* Initialize the fabric iocb list */ INIT_LIST_HEAD(&phba->fabric_iocb_list); @@ -4796,6 +5526,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) /* Initialize FCF connection rec list */ INIT_LIST_HEAD(&phba->fcf_conn_rec_list); + /* Initialize OAS configuration list */ + spin_lock_init(&phba->devicelock); + INIT_LIST_HEAD(&phba->luns); + return 0; } @@ -4918,29 +5652,42 @@ out_free_iocbq: } /** - * lpfc_free_sgl_list - Free sgl list. + * lpfc_free_sgl_list - Free a given sgl list. * @phba: pointer to lpfc hba data structure. + * @sglq_list: pointer to the head of sgl list. * - * This routine is invoked to free the driver's sgl list and memory. + * This routine is invoked to free a give sgl list and memory. **/ -static void -lpfc_free_sgl_list(struct lpfc_hba *phba) +void +lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) { struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + + list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { + list_del(&sglq_entry->list); + lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); + kfree(sglq_entry); + } +} + +/** + * lpfc_free_els_sgl_list - Free els sgl list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the driver's els sgl list and memory. + **/ +static void +lpfc_free_els_sgl_list(struct lpfc_hba *phba) +{ LIST_HEAD(sglq_list); + /* Retrieve all els sgls from driver list */ spin_lock_irq(&phba->hbalock); list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); spin_unlock_irq(&phba->hbalock); - list_for_each_entry_safe(sglq_entry, sglq_next, - &sglq_list, list) { - list_del(&sglq_entry->list); - lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); - kfree(sglq_entry); - phba->sli4_hba.total_sglq_bufs--; - } - kfree(phba->sli4_hba.lpfc_els_sgl_array); + /* Now free the sgl list */ + lpfc_free_sgl_list(phba, &sglq_list); } /** @@ -4985,99 +5732,19 @@ lpfc_free_active_sgl(struct lpfc_hba *phba) * This routine is invoked to allocate and initizlize the driver's sgl * list and set up the sgl xritag tag array accordingly. * - * Return codes - * 0 - successful - * other values - error **/ -static int +static void lpfc_init_sgl_list(struct lpfc_hba *phba) { - struct lpfc_sglq *sglq_entry = NULL; - int i; - int els_xri_cnt; - - els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "2400 ELS XRI count %d.\n", - els_xri_cnt); /* Initialize and populate the sglq list per host/VF. */ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); - /* Sanity check on XRI management */ - if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2562 No room left for SCSI XRI allocation: " - "max_xri=%d, els_xri=%d\n", - phba->sli4_hba.max_cfg_param.max_xri, - els_xri_cnt); - return -ENOMEM; - } - - /* Allocate memory for the ELS XRI management array */ - phba->sli4_hba.lpfc_els_sgl_array = - kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), - GFP_KERNEL); - - if (!phba->sli4_hba.lpfc_els_sgl_array) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2401 Failed to allocate memory for ELS " - "XRI management array of size %d.\n", - els_xri_cnt); - return -ENOMEM; - } + /* els xri-sgl book keeping */ + phba->sli4_hba.els_xri_cnt = 0; - /* Keep the SCSI XRI into the XRI management array */ - phba->sli4_hba.scsi_xri_max = - phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; + /* scsi xri-buffer book keeping */ phba->sli4_hba.scsi_xri_cnt = 0; - phba->sli4_hba.lpfc_scsi_psb_array = - kzalloc((sizeof(struct lpfc_scsi_buf *) * - phba->sli4_hba.scsi_xri_max), GFP_KERNEL); - - if (!phba->sli4_hba.lpfc_scsi_psb_array) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2563 Failed to allocate memory for SCSI " - "XRI management array of size %d.\n", - phba->sli4_hba.scsi_xri_max); - kfree(phba->sli4_hba.lpfc_els_sgl_array); - return -ENOMEM; - } - - for (i = 0; i < els_xri_cnt; i++) { - sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); - if (sglq_entry == NULL) { - printk(KERN_ERR "%s: only allocated %d sgls of " - "expected %d count. Unloading driver.\n", - __func__, i, els_xri_cnt); - goto out_free_mem; - } - - sglq_entry->buff_type = GEN_BUFF_TYPE; - sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); - if (sglq_entry->virt == NULL) { - kfree(sglq_entry); - printk(KERN_ERR "%s: failed to allocate mbuf.\n" - "Unloading driver.\n", __func__); - goto out_free_mem; - } - sglq_entry->sgl = sglq_entry->virt; - memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); - - /* The list order is used by later block SGL registraton */ - spin_lock_irq(&phba->hbalock); - sglq_entry->state = SGL_FREED; - list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); - phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; - phba->sli4_hba.total_sglq_bufs++; - spin_unlock_irq(&phba->hbalock); - } - return 0; - -out_free_mem: - kfree(phba->sli4_hba.lpfc_scsi_psb_array); - lpfc_free_sgl_list(phba); - return -ENOMEM; } /** @@ -5158,8 +5825,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) * rpi is normalized to a zero base because the physical rpi is * port based. */ - curr_rpi_range = phba->sli4_hba.next_rpi - - phba->sli4_hba.max_cfg_param.rpi_base; + curr_rpi_range = phba->sli4_hba.next_rpi; spin_unlock_irq(&phba->hbalock); /* @@ -5310,6 +5976,10 @@ lpfc_hba_free(struct lpfc_hba *phba) /* Release the driver assigned board number */ idr_remove(&lpfc_hba_index, phba->brd_no); + /* Free memory allocated with sli rings */ + kfree(phba->sli.ring); + phba->sli.ring = NULL; + kfree(phba); return; } @@ -5380,14 +6050,45 @@ lpfc_destroy_shost(struct lpfc_hba *phba) static void lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) { + uint32_t old_mask; + uint32_t old_guard; + int pagecnt = 10; if (lpfc_prot_mask && lpfc_prot_guard) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "1478 Registering BlockGuard with the " "SCSI layer\n"); - scsi_host_set_prot(shost, lpfc_prot_mask); - scsi_host_set_guard(shost, lpfc_prot_guard); + + old_mask = lpfc_prot_mask; + old_guard = lpfc_prot_guard; + + /* Only allow supported values */ + lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | + SHOST_DIX_TYPE0_PROTECTION | + SHOST_DIX_TYPE1_PROTECTION); + lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC); + + /* DIF Type 1 protection for profiles AST1/C1 is end to end */ + if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION) + lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; + + if (lpfc_prot_mask && lpfc_prot_guard) { + if ((old_mask != lpfc_prot_mask) || + (old_guard != lpfc_prot_guard)) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1475 Registering BlockGuard with the " + "SCSI layer: mask %d guard %d\n", + lpfc_prot_mask, lpfc_prot_guard); + + scsi_host_set_prot(shost, lpfc_prot_mask); + scsi_host_set_guard(shost, lpfc_prot_guard); + } else + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1479 Not Registering BlockGuard with the SCSI " + "layer, Bad protection parameters: %d %d\n", + old_mask, old_guard); } + if (!_dump_buf_data) { while (pagecnt) { spin_lock_init(&_dump_buf_lock); @@ -5750,10 +6451,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) readl(phba->sli4_hba.u.if_type2. ERR2regaddr); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2888 Port Error Detected " - "during POST: " - "port status reg 0x%x, " - "port_smphr reg 0x%x, " + "2888 Unrecoverable port error " + "following POST: port status reg " + "0x%x, port_smphr reg 0x%x, " "error 1=0x%x, error 2=0x%x\n", reg_data.word0, portsmphr_reg.word0, @@ -5813,9 +6513,11 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_SEM_OFFSET; phba->sli4_hba.RQDBregaddr = - phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; + phba->sli4_hba.conf_regs_memmap_p + + LPFC_ULP0_RQ_DOORBELL; phba->sli4_hba.WQDBregaddr = - phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL; + phba->sli4_hba.conf_regs_memmap_p + + LPFC_ULP0_WQ_DOORBELL; phba->sli4_hba.EQCQDBregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; phba->sli4_hba.MQDBregaddr = @@ -5869,9 +6571,11 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) return -ENODEV; phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + - vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); + vf * LPFC_VFR_PAGE_SIZE + + LPFC_ULP0_RQ_DOORBELL); phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + - vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); + vf * LPFC_VFR_PAGE_SIZE + + LPFC_ULP0_WQ_DOORBELL); phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + @@ -6005,8 +6709,9 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) uint32_t shdr_status, shdr_add_status; struct lpfc_mbx_get_func_cfg *get_func_cfg; struct lpfc_rsrc_desc_fcfcoe *desc; + char *pdesc_0; uint32_t desc_count; - int length, i, rc = 0; + int length, i, rc = 0, rc2; pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { @@ -6074,7 +6779,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; - phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; phba->max_vports = phba->max_vpi; @@ -6101,12 +6805,14 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) goto read_cfg_out; /* Reset the DFT_HBA_Q_DEPTH to the max xri */ - if (phba->cfg_hba_queue_depth > - (phba->sli4_hba.max_cfg_param.max_xri - - lpfc_sli4_get_els_iocb_cnt(phba))) - phba->cfg_hba_queue_depth = - phba->sli4_hba.max_cfg_param.max_xri - - lpfc_sli4_get_els_iocb_cnt(phba); + length = phba->sli4_hba.max_cfg_param.max_xri - + lpfc_sli4_get_els_iocb_cnt(phba); + if (phba->cfg_hba_queue_depth > length) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3361 HBA queue depth changed from %d to %d\n", + phba->cfg_hba_queue_depth, length); + phba->cfg_hba_queue_depth = length; + } if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_2) @@ -6119,18 +6825,17 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, length, LPFC_SLI4_MBX_EMBED); - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); shdr = (union lpfc_sli4_cfg_shdr *) &pmb->u.mqe.un.sli4_config.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (rc || shdr_status || shdr_add_status) { + if (rc2 || shdr_status || shdr_add_status) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3026 Mailbox failed , mbxCmd x%x " "GET_FUNCTION_CONFIG, mbxStatus x%x\n", bf_get(lpfc_mqe_command, &pmb->u.mqe), bf_get(lpfc_mqe_status, &pmb->u.mqe)); - rc = -EIO; goto read_cfg_out; } @@ -6138,11 +6843,18 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) get_func_cfg = &pmb->u.mqe.un.get_func_cfg; desc_count = get_func_cfg->func_cfg.rsrc_desc_count; + pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; + desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; + length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); + if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) + length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; + else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) + goto read_cfg_out; + for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { - desc = (struct lpfc_rsrc_desc_fcfcoe *) - &get_func_cfg->func_cfg.desc[i]; + desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); if (LPFC_RSRC_DESC_TYPE_FCFCOE == - bf_get(lpfc_rsrc_desc_pcie_type, desc)) { + bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { phba->sli4_hba.iov.pf_number = bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); phba->sli4_hba.iov.vf_number = @@ -6156,13 +6868,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) "3027 GET_FUNCTION_CONFIG: pf_number:%d, " "vf_number:%d\n", phba->sli4_hba.iov.pf_number, phba->sli4_hba.iov.vf_number); - else { + else lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3028 GET_FUNCTION_CONFIG: failed to find " "Resrouce Descriptor:x%x\n", LPFC_RSRC_DESC_TYPE_FCFCOE); - rc = -EIO; - } read_cfg_out: mempool_free(pmb, phba->mbox_mem_pool); @@ -6243,77 +6953,60 @@ lpfc_setup_endian_order(struct lpfc_hba *phba) static int lpfc_sli4_queue_verify(struct lpfc_hba *phba) { - int cfg_fcp_wq_count; - int cfg_fcp_eq_count; + int cfg_fcp_io_channel; + uint32_t cpu; + uint32_t i = 0; + int fof_vectors = phba->cfg_fof ? 1 : 0; /* - * Sanity check for confiugred queue parameters against the run-time + * Sanity check for configured queue parameters against the run-time * device parameters */ - /* Sanity check on FCP fast-path WQ parameters */ - cfg_fcp_wq_count = phba->cfg_fcp_wq_count; - if (cfg_fcp_wq_count > - (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { - cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - - LPFC_SP_WQN_DEF; - if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2581 Not enough WQs (%d) from " - "the pci function for supporting " - "FCP WQs (%d)\n", - phba->sli4_hba.max_cfg_param.max_wq, - phba->cfg_fcp_wq_count); - goto out_error; - } - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "2582 Not enough WQs (%d) from the pci " - "function for supporting the requested " - "FCP WQs (%d), the actual FCP WQs can " - "be supported: %d\n", - phba->sli4_hba.max_cfg_param.max_wq, - phba->cfg_fcp_wq_count, cfg_fcp_wq_count); - } - /* The actual number of FCP work queues adopted */ - phba->cfg_fcp_wq_count = cfg_fcp_wq_count; - - /* Sanity check on FCP fast-path EQ parameters */ - cfg_fcp_eq_count = phba->cfg_fcp_eq_count; - if (cfg_fcp_eq_count > - (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { - cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - - LPFC_SP_EQN_DEF; - if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { + /* Sanity check on HBA EQ parameters */ + cfg_fcp_io_channel = phba->cfg_fcp_io_channel; + + /* It doesn't make sense to have more io channels then online CPUs */ + for_each_present_cpu(cpu) { + if (cpu_online(cpu)) + i++; + } + phba->sli4_hba.num_online_cpu = i; + phba->sli4_hba.num_present_cpu = lpfc_present_cpu; + phba->sli4_hba.curr_disp_cpu = 0; + + if (i < cfg_fcp_io_channel) { + lpfc_printf_log(phba, + KERN_ERR, LOG_INIT, + "3188 Reducing IO channels to match number of " + "online CPUs: from %d to %d\n", + cfg_fcp_io_channel, i); + cfg_fcp_io_channel = i; + } + + if (cfg_fcp_io_channel + fof_vectors > + phba->sli4_hba.max_cfg_param.max_eq) { + if (phba->sli4_hba.max_cfg_param.max_eq < + LPFC_FCP_IO_CHAN_MIN) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2574 Not enough EQs (%d) from the " "pci function for supporting FCP " "EQs (%d)\n", phba->sli4_hba.max_cfg_param.max_eq, - phba->cfg_fcp_eq_count); + phba->cfg_fcp_io_channel); goto out_error; } - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "2575 Not enough EQs (%d) from the pci " - "function for supporting the requested " - "FCP EQs (%d), the actual FCP EQs can " - "be supported: %d\n", - phba->sli4_hba.max_cfg_param.max_eq, - phba->cfg_fcp_eq_count, cfg_fcp_eq_count); - } - /* It does not make sense to have more EQs than WQs */ - if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "2593 The FCP EQ count(%d) cannot be greater " - "than the FCP WQ count(%d), limiting the " - "FCP EQ count to %d\n", cfg_fcp_eq_count, - phba->cfg_fcp_wq_count, - phba->cfg_fcp_wq_count); - cfg_fcp_eq_count = phba->cfg_fcp_wq_count; + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2575 Reducing IO channels to match number of " + "available EQs: from %d to %d\n", + cfg_fcp_io_channel, + phba->sli4_hba.max_cfg_param.max_eq); + cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq - + fof_vectors; } + /* The actual number of FCP event queues adopted */ - phba->cfg_fcp_eq_count = cfg_fcp_eq_count; - /* The overall number of event queues used */ - phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; + phba->cfg_fcp_io_channel = cfg_fcp_io_channel; /* Get EQ depth from module parameter, fake the default for now */ phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; @@ -6338,7 +7031,7 @@ out_error: * we just use some constant number as place holder. * * Return codes - * 0 - sucessful + * 0 - successful * -ENOMEM - No availble memory * -EIO - The mailbox failed to complete successfully. **/ @@ -6346,50 +7039,104 @@ int lpfc_sli4_queue_create(struct lpfc_hba *phba) { struct lpfc_queue *qdesc; - int fcp_eqidx, fcp_cqidx, fcp_wqidx; + int idx; /* - * Create Event Queues (EQs) + * Create HBA Record arrays. */ + if (!phba->cfg_fcp_io_channel) + return -ERANGE; - /* Create slow path event queue */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, - phba->sli4_hba.eq_ecount); - if (!qdesc) { + phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; + phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; + phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; + phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; + phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; + phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; + + phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) * + phba->cfg_fcp_io_channel), GFP_KERNEL); + if (!phba->sli4_hba.hba_eq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2576 Failed allocate memory for " + "fast-path EQ record array\n"); + goto out_error; + } + + phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * + phba->cfg_fcp_io_channel), GFP_KERNEL); + if (!phba->sli4_hba.fcp_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2577 Failed allocate memory for fast-path " + "CQ record array\n"); + goto out_error; + } + + phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * + phba->cfg_fcp_io_channel), GFP_KERNEL); + if (!phba->sli4_hba.fcp_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0496 Failed allocate slow-path EQ\n"); + "2578 Failed allocate memory for fast-path " + "WQ record array\n"); goto out_error; } - phba->sli4_hba.sp_eq = qdesc; /* - * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be - * zero whenever there is exactly one interrupt vector. This is not - * an error. + * Since the first EQ can have multiple CQs associated with it, + * this array is used to quickly see if we have a FCP fast-path + * CQ match. */ - if (phba->cfg_fcp_eq_count) { - phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * - phba->cfg_fcp_eq_count), GFP_KERNEL); - if (!phba->sli4_hba.fp_eq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2576 Failed allocate memory for " - "fast-path EQ record array\n"); - goto out_free_sp_eq; - } + phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) * + phba->cfg_fcp_io_channel), GFP_KERNEL); + if (!phba->sli4_hba.fcp_cq_map) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2545 Failed allocate memory for fast-path " + "CQ map\n"); + goto out_error; } - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { + + /* + * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies + * how many EQs to create. + */ + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { + + /* Create EQs */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, phba->sli4_hba.eq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0497 Failed allocate fast-path EQ\n"); - goto out_free_fp_eq; + "0497 Failed allocate EQ (%d)\n", idx); + goto out_error; } - phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; + phba->sli4_hba.hba_eq[idx] = qdesc; + + /* Create Fast Path FCP CQs */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0499 Failed allocate fast-path FCP " + "CQ (%d)\n", idx); + goto out_error; + } + phba->sli4_hba.fcp_cq[idx] = qdesc; + + /* Create Fast Path FCP WQs */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0503 Failed allocate fast-path FCP " + "WQ (%d)\n", idx); + goto out_error; + } + phba->sli4_hba.fcp_wq[idx] = qdesc; } + /* - * Create Complete Queues (CQs) + * Create Slow Path Completion Queues (CQs) */ /* Create slow-path Mailbox Command Complete Queue */ @@ -6398,7 +7145,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0500 Failed allocate slow-path mailbox CQ\n"); - goto out_free_fp_eq; + goto out_error; } phba->sli4_hba.mbx_cq = qdesc; @@ -6408,59 +7155,29 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0501 Failed allocate slow-path ELS CQ\n"); - goto out_free_mbx_cq; + goto out_error; } phba->sli4_hba.els_cq = qdesc; /* - * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs. - * If there are no FCP EQs then create exactly one FCP CQ. + * Create Slow Path Work Queues (WQs) */ - if (phba->cfg_fcp_eq_count) - phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * - phba->cfg_fcp_eq_count), - GFP_KERNEL); - else - phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *), - GFP_KERNEL); - if (!phba->sli4_hba.fcp_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2577 Failed allocate memory for fast-path " - "CQ record array\n"); - goto out_free_els_cq; - } - fcp_cqidx = 0; - do { - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, - phba->sli4_hba.cq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0499 Failed allocate fast-path FCP " - "CQ (%d)\n", fcp_cqidx); - goto out_free_fcp_cq; - } - phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; - } while (++fcp_cqidx < phba->cfg_fcp_eq_count); /* Create Mailbox Command Queue */ - phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; - phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, phba->sli4_hba.mq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0505 Failed allocate slow-path MQ\n"); - goto out_free_fcp_cq; + goto out_error; } phba->sli4_hba.mbx_wq = qdesc; /* - * Create all the Work Queues (WQs) + * Create ELS Work Queues */ - phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; - phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; /* Create slow-path ELS Work Queue */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, @@ -6468,36 +7185,13 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0504 Failed allocate slow-path ELS WQ\n"); - goto out_free_mbx_wq; + goto out_error; } phba->sli4_hba.els_wq = qdesc; - /* Create fast-path FCP Work Queue(s) */ - phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * - phba->cfg_fcp_wq_count), GFP_KERNEL); - if (!phba->sli4_hba.fcp_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2578 Failed allocate memory for fast-path " - "WQ record array\n"); - goto out_free_els_wq; - } - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, - phba->sli4_hba.wq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0503 Failed allocate fast-path FCP " - "WQ (%d)\n", fcp_wqidx); - goto out_free_fcp_wq; - } - phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; - } - /* * Create Receive Queue (RQ) */ - phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; - phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; /* Create Receive Queue for header */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, @@ -6505,7 +7199,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0506 Failed allocate receive HRQ\n"); - goto out_free_fcp_wq; + goto out_error; } phba->sli4_hba.hdr_rq = qdesc; @@ -6515,52 +7209,17 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0507 Failed allocate receive DRQ\n"); - goto out_free_hdr_rq; + goto out_error; } phba->sli4_hba.dat_rq = qdesc; + /* Create the Queues needed for Flash Optimized Fabric operations */ + if (phba->cfg_fof) + lpfc_fof_queue_create(phba); return 0; -out_free_hdr_rq: - lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); - phba->sli4_hba.hdr_rq = NULL; -out_free_fcp_wq: - for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { - lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); - phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; - } - kfree(phba->sli4_hba.fcp_wq); - phba->sli4_hba.fcp_wq = NULL; -out_free_els_wq: - lpfc_sli4_queue_free(phba->sli4_hba.els_wq); - phba->sli4_hba.els_wq = NULL; -out_free_mbx_wq: - lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); - phba->sli4_hba.mbx_wq = NULL; -out_free_fcp_cq: - for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { - lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); - phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; - } - kfree(phba->sli4_hba.fcp_cq); - phba->sli4_hba.fcp_cq = NULL; -out_free_els_cq: - lpfc_sli4_queue_free(phba->sli4_hba.els_cq); - phba->sli4_hba.els_cq = NULL; -out_free_mbx_cq: - lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); - phba->sli4_hba.mbx_cq = NULL; -out_free_fp_eq: - for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { - lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); - phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; - } - kfree(phba->sli4_hba.fp_eq); - phba->sli4_hba.fp_eq = NULL; -out_free_sp_eq: - lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); - phba->sli4_hba.sp_eq = NULL; out_error: + lpfc_sli4_queue_destroy(phba); return -ENOMEM; } @@ -6579,58 +7238,89 @@ out_error: void lpfc_sli4_queue_destroy(struct lpfc_hba *phba) { - int fcp_qidx; + int idx; + + if (phba->cfg_fof) + lpfc_fof_queue_destroy(phba); + + if (phba->sli4_hba.hba_eq != NULL) { + /* Release HBA event queue */ + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { + if (phba->sli4_hba.hba_eq[idx] != NULL) { + lpfc_sli4_queue_free( + phba->sli4_hba.hba_eq[idx]); + phba->sli4_hba.hba_eq[idx] = NULL; + } + } + kfree(phba->sli4_hba.hba_eq); + phba->sli4_hba.hba_eq = NULL; + } + + if (phba->sli4_hba.fcp_cq != NULL) { + /* Release FCP completion queue */ + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { + if (phba->sli4_hba.fcp_cq[idx] != NULL) { + lpfc_sli4_queue_free( + phba->sli4_hba.fcp_cq[idx]); + phba->sli4_hba.fcp_cq[idx] = NULL; + } + } + kfree(phba->sli4_hba.fcp_cq); + phba->sli4_hba.fcp_cq = NULL; + } + + if (phba->sli4_hba.fcp_wq != NULL) { + /* Release FCP work queue */ + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { + if (phba->sli4_hba.fcp_wq[idx] != NULL) { + lpfc_sli4_queue_free( + phba->sli4_hba.fcp_wq[idx]); + phba->sli4_hba.fcp_wq[idx] = NULL; + } + } + kfree(phba->sli4_hba.fcp_wq); + phba->sli4_hba.fcp_wq = NULL; + } + + /* Release FCP CQ mapping array */ + if (phba->sli4_hba.fcp_cq_map != NULL) { + kfree(phba->sli4_hba.fcp_cq_map); + phba->sli4_hba.fcp_cq_map = NULL; + } /* Release mailbox command work queue */ - lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); - phba->sli4_hba.mbx_wq = NULL; + if (phba->sli4_hba.mbx_wq != NULL) { + lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); + phba->sli4_hba.mbx_wq = NULL; + } /* Release ELS work queue */ - lpfc_sli4_queue_free(phba->sli4_hba.els_wq); - phba->sli4_hba.els_wq = NULL; - - /* Release FCP work queue */ - if (phba->sli4_hba.fcp_wq != NULL) - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; - fcp_qidx++) - lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); - kfree(phba->sli4_hba.fcp_wq); - phba->sli4_hba.fcp_wq = NULL; + if (phba->sli4_hba.els_wq != NULL) { + lpfc_sli4_queue_free(phba->sli4_hba.els_wq); + phba->sli4_hba.els_wq = NULL; + } /* Release unsolicited receive queue */ - lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); - phba->sli4_hba.hdr_rq = NULL; - lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); - phba->sli4_hba.dat_rq = NULL; + if (phba->sli4_hba.hdr_rq != NULL) { + lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); + phba->sli4_hba.hdr_rq = NULL; + } + if (phba->sli4_hba.dat_rq != NULL) { + lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); + phba->sli4_hba.dat_rq = NULL; + } /* Release ELS complete queue */ - lpfc_sli4_queue_free(phba->sli4_hba.els_cq); - phba->sli4_hba.els_cq = NULL; + if (phba->sli4_hba.els_cq != NULL) { + lpfc_sli4_queue_free(phba->sli4_hba.els_cq); + phba->sli4_hba.els_cq = NULL; + } /* Release mailbox command complete queue */ - lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); - phba->sli4_hba.mbx_cq = NULL; - - /* Release FCP response complete queue */ - fcp_qidx = 0; - if (phba->sli4_hba.fcp_cq != NULL) - do - lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); - while (++fcp_qidx < phba->cfg_fcp_eq_count); - kfree(phba->sli4_hba.fcp_cq); - phba->sli4_hba.fcp_cq = NULL; - - /* Release fast-path event queue */ - if (phba->sli4_hba.fp_eq != NULL) - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; - fcp_qidx++) - lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); - kfree(phba->sli4_hba.fp_eq); - phba->sli4_hba.fp_eq = NULL; - - /* Release slow-path event queue */ - lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); - phba->sli4_hba.sp_eq = NULL; + if (phba->sli4_hba.mbx_cq != NULL) { + lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); + phba->sli4_hba.mbx_cq = NULL; + } return; } @@ -6650,61 +7340,171 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) int lpfc_sli4_queue_setup(struct lpfc_hba *phba) { + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; int rc = -ENOMEM; int fcp_eqidx, fcp_cqidx, fcp_wqidx; int fcp_cq_index = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + LPFC_MBOXQ_t *mboxq; + uint32_t length; - /* - * Set up Event Queues (EQs) - */ - - /* Set up slow-path event queue */ - if (!phba->sli4_hba.sp_eq) { + /* Check for dual-ULP support */ + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0520 Slow-path EQ not allocated\n"); - goto out_error; + "3249 Unable to allocate memory for " + "QUERY_FW_CFG mailbox command\n"); + return -ENOMEM; } - rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, - LPFC_SP_DEF_IMAX); - if (rc) { + length = (sizeof(struct lpfc_mbx_query_fw_config) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_QUERY_FW_CFG, + length, LPFC_SLI4_MBX_EMBED); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + + shdr = (union lpfc_sli4_cfg_shdr *) + &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0521 Failed setup of slow-path EQ: " - "rc = 0x%x\n", rc); + "3250 QUERY_FW_CFG mailbox failed with status " + "x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); + rc = -ENXIO; goto out_error; } + + phba->sli4_hba.fw_func_mode = + mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; + phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; + phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2583 Slow-path EQ setup: queue-id=%d\n", - phba->sli4_hba.sp_eq->queue_id); + "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " + "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, + phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); + + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); + + /* + * Set up HBA Event Queues (EQs) + */ - /* Set up fast-path event queue */ - if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) { + /* Set up HBA event queue */ + if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3147 Fast-path EQs not allocated\n"); rc = -ENOMEM; - goto out_destroy_sp_eq; + goto out_error; } - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { - if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { + if (!phba->sli4_hba.hba_eq[fcp_eqidx]) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0522 Fast-path EQ (%d) not " "allocated\n", fcp_eqidx); rc = -ENOMEM; - goto out_destroy_fp_eq; + goto out_destroy_hba_eq; } - rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], - phba->cfg_fcp_imax); + rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx], + (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel)); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0523 Failed setup of fast-path EQ " "(%d), rc = 0x%x\n", fcp_eqidx, rc); - goto out_destroy_fp_eq; + goto out_destroy_hba_eq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2584 Fast-path EQ setup: " + "2584 HBA EQ setup: " "queue[%d]-id=%d\n", fcp_eqidx, - phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); + phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id); } + /* Set up fast-path FCP Response Complete Queue */ + if (!phba->sli4_hba.fcp_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3148 Fast-path FCP CQ array not " + "allocated\n"); + rc = -ENOMEM; + goto out_destroy_hba_eq; + } + + for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) { + if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0526 Fast-path FCP CQ (%d) not " + "allocated\n", fcp_cqidx); + rc = -ENOMEM; + goto out_destroy_fcp_cq; + } + rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], + phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0527 Failed setup of fast-path FCP " + "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); + goto out_destroy_fcp_cq; + } + + /* Setup fcp_cq_map for fast lookup */ + phba->sli4_hba.fcp_cq_map[fcp_cqidx] = + phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2588 FCP CQ setup: cq[%d]-id=%d, " + "parent seq[%d]-id=%d\n", + fcp_cqidx, + phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, + fcp_cqidx, + phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id); + } + + /* Set up fast-path FCP Work Queue */ + if (!phba->sli4_hba.fcp_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3149 Fast-path FCP WQ array not " + "allocated\n"); + rc = -ENOMEM; + goto out_destroy_fcp_cq; + } + + for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) { + if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0534 Fast-path FCP WQ (%d) not " + "allocated\n", fcp_wqidx); + rc = -ENOMEM; + goto out_destroy_fcp_wq; + } + rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], + phba->sli4_hba.fcp_cq[fcp_wqidx], + LPFC_FCP); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0535 Failed setup of fast-path FCP " + "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); + goto out_destroy_fcp_wq; + } + + /* Bind this WQ to the next FCP ring */ + pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx]; + pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx]; + phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2591 FCP WQ setup: wq[%d]-id=%d, " + "parent cq[%d]-id=%d\n", + fcp_wqidx, + phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, + fcp_cq_index, + phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id); + } /* * Set up Complete Queues (CQs) */ @@ -6714,20 +7514,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0528 Mailbox CQ not allocated\n"); rc = -ENOMEM; - goto out_destroy_fp_eq; + goto out_destroy_fcp_wq; } - rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, - LPFC_MCQ, LPFC_MBOX); + rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, + phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0529 Failed setup of slow-path mailbox CQ: " "rc = 0x%x\n", rc); - goto out_destroy_fp_eq; + goto out_destroy_fcp_wq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", phba->sli4_hba.mbx_cq->queue_id, - phba->sli4_hba.sp_eq->queue_id); + phba->sli4_hba.hba_eq[0]->queue_id); /* Set up slow-path ELS Complete Queue */ if (!phba->sli4_hba.els_cq) { @@ -6736,8 +7536,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) rc = -ENOMEM; goto out_destroy_mbx_cq; } - rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, - LPFC_WCQ, LPFC_ELS); + rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, + phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0531 Failed setup of slow-path ELS CQ: " @@ -6747,52 +7547,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", phba->sli4_hba.els_cq->queue_id, - phba->sli4_hba.sp_eq->queue_id); - - /* Set up fast-path FCP Response Complete Queue */ - if (!phba->sli4_hba.fcp_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3148 Fast-path FCP CQ array not " - "allocated\n"); - rc = -ENOMEM; - goto out_destroy_els_cq; - } - fcp_cqidx = 0; - do { - if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0526 Fast-path FCP CQ (%d) not " - "allocated\n", fcp_cqidx); - rc = -ENOMEM; - goto out_destroy_fcp_cq; - } - if (phba->cfg_fcp_eq_count) - rc = lpfc_cq_create(phba, - phba->sli4_hba.fcp_cq[fcp_cqidx], - phba->sli4_hba.fp_eq[fcp_cqidx], - LPFC_WCQ, LPFC_FCP); - else - rc = lpfc_cq_create(phba, - phba->sli4_hba.fcp_cq[fcp_cqidx], - phba->sli4_hba.sp_eq, - LPFC_WCQ, LPFC_FCP); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0527 Failed setup of fast-path FCP " - "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); - goto out_destroy_fcp_cq; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2588 FCP CQ setup: cq[%d]-id=%d, " - "parent %seq[%d]-id=%d\n", - fcp_cqidx, - phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, - (phba->cfg_fcp_eq_count) ? "" : "sp_", - fcp_cqidx, - (phba->cfg_fcp_eq_count) ? - phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id : - phba->sli4_hba.sp_eq->queue_id); - } while (++fcp_cqidx < phba->cfg_fcp_eq_count); + phba->sli4_hba.hba_eq[0]->queue_id); /* * Set up all the Work Queues (WQs) @@ -6803,7 +7558,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0538 Slow-path MQ not allocated\n"); rc = -ENOMEM; - goto out_destroy_fcp_cq; + goto out_destroy_els_cq; } rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, phba->sli4_hba.mbx_cq, LPFC_MBOX); @@ -6811,7 +7566,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0539 Failed setup of slow-path MQ: " "rc = 0x%x\n", rc); - goto out_destroy_fcp_cq; + goto out_destroy_els_cq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", @@ -6833,49 +7588,17 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) "rc = 0x%x\n", rc); goto out_destroy_mbx_wq; } + + /* Bind this WQ to the ELS ring */ + pring = &psli->ring[LPFC_ELS_RING]; + pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq; + phba->sli4_hba.els_cq->pring = pring; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", phba->sli4_hba.els_wq->queue_id, phba->sli4_hba.els_cq->queue_id); - /* Set up fast-path FCP Work Queue */ - if (!phba->sli4_hba.fcp_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3149 Fast-path FCP WQ array not " - "allocated\n"); - rc = -ENOMEM; - goto out_destroy_els_wq; - } - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { - if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0534 Fast-path FCP WQ (%d) not " - "allocated\n", fcp_wqidx); - rc = -ENOMEM; - goto out_destroy_fcp_wq; - } - rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], - phba->sli4_hba.fcp_cq[fcp_cq_index], - LPFC_FCP); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0535 Failed setup of fast-path FCP " - "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); - goto out_destroy_fcp_wq; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2591 FCP WQ setup: wq[%d]-id=%d, " - "parent cq[%d]-id=%d\n", - fcp_wqidx, - phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, - fcp_cq_index, - phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); - /* Round robin FCP Work Queue's Completion Queue assignment */ - if (phba->cfg_fcp_eq_count) - fcp_cq_index = ((fcp_cq_index + 1) % - phba->cfg_fcp_eq_count); - } - /* * Create Receive Queue (RQ) */ @@ -6883,7 +7606,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0540 Receive Queue not allocated\n"); rc = -ENOMEM; - goto out_destroy_fcp_wq; + goto out_destroy_els_wq; } lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); @@ -6904,27 +7627,37 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) phba->sli4_hba.hdr_rq->queue_id, phba->sli4_hba.dat_rq->queue_id, phba->sli4_hba.els_cq->queue_id); + + if (phba->cfg_fof) { + rc = lpfc_fof_queue_setup(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0549 Failed setup of FOF Queues: " + "rc = 0x%x\n", rc); + goto out_destroy_els_rq; + } + } return 0; -out_destroy_fcp_wq: - for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) - lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); +out_destroy_els_rq: + lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); out_destroy_els_wq: lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); out_destroy_mbx_wq: lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); -out_destroy_fcp_cq: - for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) - lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); out_destroy_els_cq: lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); out_destroy_mbx_cq: lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); -out_destroy_fp_eq: +out_destroy_fcp_wq: + for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) + lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); +out_destroy_fcp_cq: + for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) + lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); +out_destroy_hba_eq: for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) - lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); -out_destroy_sp_eq: - lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); + lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]); out_error: return rc; } @@ -6946,6 +7679,9 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) { int fcp_qidx; + /* Unset the queues created for Flash Optimized Fabric operations */ + if (phba->cfg_fof) + lpfc_fof_queue_destroy(phba); /* Unset mailbox command work queue */ lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); /* Unset ELS work queue */ @@ -6953,27 +7689,27 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) /* Unset unsolicited receive queue */ lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); /* Unset FCP work queue */ - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) - lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); + if (phba->sli4_hba.fcp_wq) { + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; + fcp_qidx++) + lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); + } /* Unset mailbox command complete queue */ lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); /* Unset ELS complete queue */ lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); /* Unset FCP response complete queue */ if (phba->sli4_hba.fcp_cq) { - fcp_qidx = 0; - do { + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; + fcp_qidx++) lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); - } while (++fcp_qidx < phba->cfg_fcp_eq_count); } /* Unset fast-path event queue */ - if (phba->sli4_hba.fp_eq) { - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; + if (phba->sli4_hba.hba_eq) { + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; fcp_qidx++) - lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); + lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]); } - /* Unset slow-path event queue */ - lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); } /** @@ -7163,6 +7899,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) uint32_t rdy_chk, num_resets = 0, reset_again = 0; union lpfc_sli4_cfg_shdr *shdr; struct lpfc_register reg_data; + uint16_t devid; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { @@ -7209,7 +7946,9 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) LPFC_SLIPORT_INIT_PORT); writel(reg_data.word0, phba->sli4_hba.u.if_type2. CTRLregaddr); - + /* flush */ + pci_read_config_word(phba->pcidev, + PCI_DEVICE_ID, &devid); /* * Poll the Port Status Register and wait for RDY for * up to 10 seconds. If the port doesn't respond, treat @@ -7223,19 +7962,17 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) rc = -ENODEV; goto out; } - if (bf_get(lpfc_sliport_status_rdy, ®_data)) - break; - if (bf_get(lpfc_sliport_status_rn, ®_data)) { + if (bf_get(lpfc_sliport_status_rn, ®_data)) reset_again++; + if (bf_get(lpfc_sliport_status_rdy, ®_data)) break; - } } /* * If the port responds to the init request with * reset needed, delay for a bit and restart the loop. */ - if (reset_again) { + if (reset_again && (rdy_chk < 1000)) { msleep(10); reset_again = 0; continue; @@ -7249,10 +7986,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) phba->work_status[1] = readl( phba->sli4_hba.u.if_type2.ERR2regaddr); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2890 Port Error Detected " - "during Port Reset: " + "2890 Port error detected during port " + "reset(%d): wait_tmo:%d ms, " "port status reg 0x%x, " "error 1=0x%x, error 2=0x%x\n", + num_resets, rdy_chk*10, reg_data.word0, phba->work_status[0], phba->work_status[1]); @@ -7276,81 +8014,15 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) out: /* Catch the not-ready port failure after a port reset. */ - if (num_resets >= MAX_IF_TYPE_2_RESETS) - rc = -ENODEV; - - return rc; -} - -/** - * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands - * @phba: pointer to lpfc hba data structure. - * @cnt: number of nop mailbox commands to send. - * - * This routine is invoked to send a number @cnt of NOP mailbox command and - * wait for each command to complete. - * - * Return: the number of NOP mailbox command completed. - **/ -static int -lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) -{ - LPFC_MBOXQ_t *mboxq; - int length, cmdsent; - uint32_t mbox_tmo; - uint32_t rc = 0; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - if (cnt == 0) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "2518 Requested to send 0 NOP mailbox cmd\n"); - return cnt; - } - - mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mboxq) { + if (num_resets >= MAX_IF_TYPE_2_RESETS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2519 Unable to allocate memory for issuing " - "NOP mailbox command\n"); - return 0; - } - - /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ - length = (sizeof(struct lpfc_mbx_nop) - - sizeof(struct lpfc_sli4_cfg_mhdr)); - lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, - LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); - - for (cmdsent = 0; cmdsent < cnt; cmdsent++) { - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - else { - mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); - rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); - } - if (rc == MBX_TIMEOUT) - break; - /* Check return status */ - shdr = (union lpfc_sli4_cfg_shdr *) - &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, - &shdr->response); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "2520 NOP mailbox command failed " - "status x%x add_status x%x mbx " - "status x%x\n", shdr_status, - shdr_add_status, rc); - break; - } + "3317 HBA not functional: IP Reset Failed " + "after (%d) retries, try: " + "echo fw_reset > board_mode\n", num_resets); + rc = -ENODEV; } - if (rc != MBX_TIMEOUT) - mempool_free(mboxq, phba->mbox_mem_pool); - - return cmdsent; + return rc; } /** @@ -7413,9 +8085,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) * particular PCI BARs regions is dependent on the type of * SLI4 device. */ - if (pci_resource_start(pdev, 0)) { - phba->pci_bar0_map = pci_resource_start(pdev, 0); - bar0map_len = pci_resource_len(pdev, 0); + if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { + phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); + bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); /* * Map SLI4 PCI Config Space Register base to a kernel virtual @@ -7429,6 +8101,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) "registers.\n"); goto out; } + phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; /* Set up BAR0 PCI config space register memory map */ lpfc_sli4_bar0_register_memmap(phba, if_type); } else { @@ -7451,13 +8124,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) } if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && - (pci_resource_start(pdev, 2))) { + (pci_resource_start(pdev, PCI_64BIT_BAR2))) { /* * Map SLI4 if type 0 HBA Control Register base to a kernel * virtual address and setup the registers. */ - phba->pci_bar1_map = pci_resource_start(pdev, 2); - bar1map_len = pci_resource_len(pdev, 2); + phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); + bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); phba->sli4_hba.ctrl_regs_memmap_p = ioremap(phba->pci_bar1_map, bar1map_len); if (!phba->sli4_hba.ctrl_regs_memmap_p) { @@ -7465,17 +8138,18 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) "ioremap failed for SLI4 HBA control registers.\n"); goto out_iounmap_conf; } + phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; lpfc_sli4_bar1_register_memmap(phba); } if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && - (pci_resource_start(pdev, 4))) { + (pci_resource_start(pdev, PCI_64BIT_BAR4))) { /* * Map SLI4 if type 0 HBA Doorbell Register base to a kernel * virtual address and setup the registers. */ - phba->pci_bar2_map = pci_resource_start(pdev, 4); - bar2map_len = pci_resource_len(pdev, 4); + phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); + bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); phba->sli4_hba.drbl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); if (!phba->sli4_hba.drbl_regs_memmap_p) { @@ -7483,6 +8157,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) "ioremap failed for SLI4 HBA doorbell registers.\n"); goto out_iounmap_ctrl; } + phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); if (error) goto out_iounmap_all; @@ -7813,6 +8488,287 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba) } /** + * lpfc_find_next_cpu - Find next available CPU that matches the phys_id + * @phba: pointer to lpfc hba data structure. + * + * Find next available CPU to use for IRQ to CPU affinity. + */ +static int +lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id) +{ + struct lpfc_vector_map_info *cpup; + int cpu; + + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + /* CPU must be online */ + if (cpu_online(cpu)) { + if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && + (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) && + (cpup->phys_id == phys_id)) { + return cpu; + } + } + cpup++; + } + + /* + * If we get here, we have used ALL CPUs for the specific + * phys_id. Now we need to clear out lpfc_used_cpu and start + * reusing CPUs. + */ + + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + if (lpfc_used_cpu[cpu] == phys_id) + lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; + } + + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + /* CPU must be online */ + if (cpu_online(cpu)) { + if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && + (cpup->phys_id == phys_id)) { + return cpu; + } + } + cpup++; + } + return LPFC_VECTOR_MAP_EMPTY; +} + +/** + * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors + * @phba: pointer to lpfc hba data structure. + * @vectors: number of HBA vectors + * + * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector + * affinization across multple physical CPUs (numa nodes). + * In addition, this routine will assign an IO channel for each CPU + * to use when issuing I/Os. + */ +static int +lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) +{ + int i, idx, saved_chann, used_chann, cpu, phys_id; + int max_phys_id, min_phys_id; + int num_io_channel, first_cpu, chan; + struct lpfc_vector_map_info *cpup; +#ifdef CONFIG_X86 + struct cpuinfo_x86 *cpuinfo; +#endif + struct cpumask *mask; + uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1]; + + /* If there is no mapping, just return */ + if (!phba->cfg_fcp_cpu_map) + return 1; + + /* Init cpu_map array */ + memset(phba->sli4_hba.cpu_map, 0xff, + (sizeof(struct lpfc_vector_map_info) * + phba->sli4_hba.num_present_cpu)); + + max_phys_id = 0; + min_phys_id = 0xff; + phys_id = 0; + num_io_channel = 0; + first_cpu = LPFC_VECTOR_MAP_EMPTY; + + /* Update CPU map with physical id and core id of each CPU */ + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { +#ifdef CONFIG_X86 + cpuinfo = &cpu_data(cpu); + cpup->phys_id = cpuinfo->phys_proc_id; + cpup->core_id = cpuinfo->cpu_core_id; +#else + /* No distinction between CPUs for other platforms */ + cpup->phys_id = 0; + cpup->core_id = 0; +#endif + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3328 CPU physid %d coreid %d\n", + cpup->phys_id, cpup->core_id); + + if (cpup->phys_id > max_phys_id) + max_phys_id = cpup->phys_id; + if (cpup->phys_id < min_phys_id) + min_phys_id = cpup->phys_id; + cpup++; + } + + phys_id = min_phys_id; + /* Now associate the HBA vectors with specific CPUs */ + for (idx = 0; idx < vectors; idx++) { + cpup = phba->sli4_hba.cpu_map; + cpu = lpfc_find_next_cpu(phba, phys_id); + if (cpu == LPFC_VECTOR_MAP_EMPTY) { + + /* Try for all phys_id's */ + for (i = 1; i < max_phys_id; i++) { + phys_id++; + if (phys_id > max_phys_id) + phys_id = min_phys_id; + cpu = lpfc_find_next_cpu(phba, phys_id); + if (cpu == LPFC_VECTOR_MAP_EMPTY) + continue; + goto found; + } + + /* Use round robin for scheduling */ + phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; + chan = 0; + cpup = phba->sli4_hba.cpu_map; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + cpup->channel_id = chan; + cpup++; + chan++; + if (chan >= phba->cfg_fcp_io_channel) + chan = 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3329 Cannot set affinity:" + "Error mapping vector %d (%d)\n", + idx, vectors); + return 0; + } +found: + cpup += cpu; + if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP) + lpfc_used_cpu[cpu] = phys_id; + + /* Associate vector with selected CPU */ + cpup->irq = phba->sli4_hba.msix_entries[idx].vector; + + /* Associate IO channel with selected CPU */ + cpup->channel_id = idx; + num_io_channel++; + + if (first_cpu == LPFC_VECTOR_MAP_EMPTY) + first_cpu = cpu; + + /* Now affinitize to the selected CPU */ + mask = &cpup->maskbits; + cpumask_clear(mask); + cpumask_set_cpu(cpu, mask); + i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx]. + vector, mask); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3330 Set Affinity: CPU %d channel %d " + "irq %d (%x)\n", + cpu, cpup->channel_id, + phba->sli4_hba.msix_entries[idx].vector, i); + + /* Spread vector mapping across multple physical CPU nodes */ + phys_id++; + if (phys_id > max_phys_id) + phys_id = min_phys_id; + } + + /* + * Finally fill in the IO channel for any remaining CPUs. + * At this point, all IO channels have been assigned to a specific + * MSIx vector, mapped to a specific CPU. + * Base the remaining IO channel assigned, to IO channels already + * assigned to other CPUs on the same phys_id. + */ + for (i = min_phys_id; i <= max_phys_id; i++) { + /* + * If there are no io channels already mapped to + * this phys_id, just round robin thru the io_channels. + * Setup chann[] for round robin. + */ + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + chann[idx] = idx; + + saved_chann = 0; + used_chann = 0; + + /* + * First build a list of IO channels already assigned + * to this phys_id before reassigning the same IO + * channels to the remaining CPUs. + */ + cpup = phba->sli4_hba.cpu_map; + cpu = first_cpu; + cpup += cpu; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; + idx++) { + if (cpup->phys_id == i) { + /* + * Save any IO channels that are + * already mapped to this phys_id. + */ + if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { + chann[saved_chann] = + cpup->channel_id; + saved_chann++; + goto out; + } + + /* See if we are using round-robin */ + if (saved_chann == 0) + saved_chann = + phba->cfg_fcp_io_channel; + + /* Associate next IO channel with CPU */ + cpup->channel_id = chann[used_chann]; + num_io_channel++; + used_chann++; + if (used_chann == saved_chann) + used_chann = 0; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3331 Set IO_CHANN " + "CPU %d channel %d\n", + idx, cpup->channel_id); + } +out: + cpu++; + if (cpu >= phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + cpu = 0; + } else { + cpup++; + } + } + } + + if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { + if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) { + cpup->channel_id = 0; + num_io_channel++; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3332 Assign IO_CHANN " + "CPU %d channel %d\n", + idx, cpup->channel_id); + } + cpup++; + } + } + + /* Sanity check */ + if (num_io_channel != phba->sli4_hba.num_present_cpu) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3333 Set affinity mismatch:" + "%d chann != %d cpus: %d vectors\n", + num_io_channel, phba->sli4_hba.num_present_cpu, + vectors); + + /* Enable using cpu affinity for scheduling */ + phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; + return 1; +} + + +/** * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * @@ -7838,11 +8794,15 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) int vectors, rc, index; /* Set up MSI-X multi-message vectors */ - for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) + for (index = 0; index < phba->cfg_fcp_io_channel; index++) phba->sli4_hba.msix_entries[index].entry = index; /* Configure MSI-X capability structure */ - vectors = phba->sli4_hba.cfg_eqn; + vectors = phba->cfg_fcp_io_channel; + if (phba->cfg_fof) { + phba->sli4_hba.msix_entries[index].entry = index; + vectors++; + } enable_msix_vectors: rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, vectors); @@ -7862,33 +8822,28 @@ enable_msix_vectors: "message=%d\n", index, phba->sli4_hba.msix_entries[index].vector, phba->sli4_hba.msix_entries[index].entry); - /* - * Assign MSI-X vectors to interrupt handlers - */ - if (vectors > 1) - rc = request_irq(phba->sli4_hba.msix_entries[0].vector, - &lpfc_sli4_sp_intr_handler, IRQF_SHARED, - LPFC_SP_DRIVER_HANDLER_NAME, phba); - else - /* All Interrupts need to be handled by one EQ */ - rc = request_irq(phba->sli4_hba.msix_entries[0].vector, - &lpfc_sli4_intr_handler, IRQF_SHARED, - LPFC_DRIVER_NAME, phba); - if (rc) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0485 MSI-X slow-path request_irq failed " - "(%d)\n", rc); - goto msi_fail_out; - } - /* The rest of the vector(s) are associated to fast-path handler(s) */ - for (index = 1; index < vectors; index++) { - phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; - phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; - rc = request_irq(phba->sli4_hba.msix_entries[index].vector, - &lpfc_sli4_fp_intr_handler, IRQF_SHARED, - LPFC_FP_DRIVER_HANDLER_NAME, - &phba->sli4_hba.fcp_eq_hdl[index - 1]); + /* Assign MSI-X vectors to interrupt handlers */ + for (index = 0; index < vectors; index++) { + memset(&phba->sli4_hba.handler_name[index], 0, 16); + sprintf((char *)&phba->sli4_hba.handler_name[index], + LPFC_DRIVER_HANDLER_NAME"%d", index); + + phba->sli4_hba.fcp_eq_hdl[index].idx = index; + phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1); + if (phba->cfg_fof && (index == (vectors - 1))) + rc = request_irq( + phba->sli4_hba.msix_entries[index].vector, + &lpfc_sli4_fof_intr_handler, IRQF_SHARED, + (char *)&phba->sli4_hba.handler_name[index], + &phba->sli4_hba.fcp_eq_hdl[index]); + else + rc = request_irq( + phba->sli4_hba.msix_entries[index].vector, + &lpfc_sli4_hba_intr_handler, IRQF_SHARED, + (char *)&phba->sli4_hba.handler_name[index], + &phba->sli4_hba.fcp_eq_hdl[index]); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0486 MSI-X fast-path (%d) " @@ -7896,18 +8851,29 @@ enable_msix_vectors: goto cfg_fail_out; } } - phba->sli4_hba.msix_vec_nr = vectors; + if (phba->cfg_fof) + vectors--; + + if (vectors != phba->cfg_fcp_io_channel) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3238 Reducing IO channels to match number of " + "MSI-X vectors, requested %d got %d\n", + phba->cfg_fcp_io_channel, vectors); + phba->cfg_fcp_io_channel = vectors; + } + + lpfc_sli4_set_affinity(phba, vectors); return rc; cfg_fail_out: /* free the irq already requested */ - for (--index; index >= 1; index--) - free_irq(phba->sli4_hba.msix_entries[index - 1].vector, - &phba->sli4_hba.fcp_eq_hdl[index - 1]); - - /* free the irq already requested */ - free_irq(phba->sli4_hba.msix_entries[0].vector, phba); + for (--index; index >= 0; index--) { + irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. + vector, NULL); + free_irq(phba->sli4_hba.msix_entries[index].vector, + &phba->sli4_hba.fcp_eq_hdl[index]); + } msi_fail_out: /* Unconfigure MSI-X capability structure */ @@ -7928,12 +8894,16 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba) int index; /* Free up MSI-X multi-message vectors */ - free_irq(phba->sli4_hba.msix_entries[0].vector, phba); - - for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++) + for (index = 0; index < phba->cfg_fcp_io_channel; index++) { + irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. + vector, NULL); free_irq(phba->sli4_hba.msix_entries[index].vector, - &phba->sli4_hba.fcp_eq_hdl[index - 1]); - + &phba->sli4_hba.fcp_eq_hdl[index]); + } + if (phba->cfg_fof) { + free_irq(phba->sli4_hba.msix_entries[index].vector, + &phba->sli4_hba.fcp_eq_hdl[index]); + } /* Disable MSI-X */ pci_disable_msix(phba->pcidev); @@ -7978,11 +8948,15 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) return rc; } - for (index = 0; index < phba->cfg_fcp_eq_count; index++) { + for (index = 0; index < phba->cfg_fcp_io_channel; index++) { phba->sli4_hba.fcp_eq_hdl[index].idx = index; phba->sli4_hba.fcp_eq_hdl[index].phba = phba; } + if (phba->cfg_fof) { + phba->sli4_hba.fcp_eq_hdl[index].idx = index; + phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + } return 0; } @@ -8058,10 +9032,18 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) /* Indicate initialization to INTx mode */ phba->intr_type = INTx; intr_mode = 0; - for (index = 0; index < phba->cfg_fcp_eq_count; + for (index = 0; index < phba->cfg_fcp_io_channel; index++) { phba->sli4_hba.fcp_eq_hdl[index].idx = index; phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. + fcp_eq_in_use, 1); + } + if (phba->cfg_fof) { + phba->sli4_hba.fcp_eq_hdl[index].idx = index; + phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. + fcp_eq_in_use, 1); } } } @@ -8112,6 +9094,9 @@ lpfc_unset_hba(struct lpfc_hba *phba) vport->load_flag |= FC_UNLOADING; spin_unlock_irq(shost->host_lock); + kfree(phba->vpi_bmask); + kfree(phba->vpi_ids); + lpfc_stop_hba_timers(phba); phba->pport->work_port_events = 0; @@ -8126,37 +9111,6 @@ lpfc_unset_hba(struct lpfc_hba *phba) } /** - * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to unset the HBA device initialization steps to - * a device with SLI-4 interface spec. - **/ -static void -lpfc_sli4_unset_hba(struct lpfc_hba *phba) -{ - struct lpfc_vport *vport = phba->pport; - struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - - spin_lock_irq(shost->host_lock); - vport->load_flag |= FC_UNLOADING; - spin_unlock_irq(shost->host_lock); - - phba->pport->work_port_events = 0; - - /* Stop the SLI4 device port */ - lpfc_stop_port(phba); - - lpfc_sli4_disable_intr(phba); - - /* Reset SLI4 HBA FCoE function */ - lpfc_pci_function_reset(phba); - lpfc_sli4_queue_destroy(phba); - - return; -} - -/** * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy * @phba: Pointer to HBA context object. * @@ -8401,10 +9355,12 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); + sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); + sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, mbx_sli4_parameters); sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, @@ -8436,7 +9392,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) * 0 - driver can claim the device * negative value - driver can not claim the device **/ -static int __devinit +static int lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) { struct lpfc_hba *phba; @@ -8603,7 +9559,7 @@ out_free_phba: * removed from PCI bus, it performs all the necessary cleanup for the HBA * device to be removed from the PCI subsystem properly. **/ -static void __devexit +static void lpfc_pci_remove_one_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); @@ -8622,8 +9578,11 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) /* Release all the vports against this physical port */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + if (vports[i]->port_type == LPFC_PHYSICAL_PORT) + continue; fc_vport_terminate(vports[i]->fc_vport); + } lpfc_destroy_vport_work_array(phba, vports); /* Remove FC host and then SCSI host with the physical port */ @@ -8644,6 +9603,9 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) /* Final cleanup of txcmplq and reset the HBA */ lpfc_sli_brdrestart(phba); + kfree(phba->vpi_bmask); + kfree(phba->vpi_ids); + lpfc_stop_hba_timers(phba); spin_lock_irq(&phba->hbalock); list_del_init(&vport->listentry); @@ -8658,7 +9620,6 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) /* Disable interrupt */ lpfc_sli_disable_intr(phba); - pci_set_drvdata(pdev, NULL); scsi_host_put(shost); /* @@ -8716,7 +9677,7 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) "0473 PCI device Power Management suspend.\n"); /* Bring down the device */ - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); kthread_stop(phba->worker_thread); @@ -8813,9 +9774,6 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) static void lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2723 PCI channel I/O abort preparing for recovery\n"); @@ -8823,8 +9781,7 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) * There may be errored I/Os through HBA, abort all I/Os on txcmplq * and let the SCSI mid-layer to retry them to recover. */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_sli_abort_fcp_rings(phba); } /** @@ -8842,20 +9799,20 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) "2710 PCI channel disable preparing for reset\n"); /* Block any management I/Os to the device */ - lpfc_block_mgmt_io(phba); + lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ + lpfc_sli_flush_fcp_rings(phba); + /* stop all timers */ lpfc_stop_hba_timers(phba); /* Disable interrupt and pci device */ lpfc_sli_disable_intr(phba); pci_disable_device(phba->pcidev); - - /* Flush all driver's outstanding SCSI I/Os as we are to reset */ - lpfc_sli_flush_fcp_rings(phba); } /** @@ -8986,7 +9943,7 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev) phba->intr_mode = intr_mode; /* Take device offline, it will perform cleanup */ - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); lpfc_sli_brdrestart(phba); @@ -9040,31 +9997,40 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) return 50; else if (max_xri <= 1024) return 100; - else + else if (max_xri <= 1536) return 150; + else if (max_xri <= 2048) + return 200; + else + return 250; } else return 0; } /** * lpfc_write_firmware - attempt to write a firmware image to the port - * @phba: pointer to lpfc hba data structure. * @fw: pointer to firmware image returned from request_firmware. + * @phba: pointer to lpfc hba data structure. * - * returns the number of bytes written if write is successful. - * returns a negative error value if there were errors. - * returns 0 if firmware matches currently active firmware on port. **/ -int -lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) +static void +lpfc_write_firmware(const struct firmware *fw, void *context) { - char fwrev[32]; - struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data; + struct lpfc_hba *phba = (struct lpfc_hba *)context; + char fwrev[FW_REV_STR_SIZE]; + struct lpfc_grp_hdr *image; struct list_head dma_buffer_list; int i, rc = 0; struct lpfc_dmabuf *dmabuf, *next; uint32_t offset = 0, temp_offset = 0; + /* It can be null in no-wait mode, sanity check */ + if (!fw) { + rc = -ENXIO; + goto out; + } + image = (struct lpfc_grp_hdr *)fw->data; + INIT_LIST_HEAD(&dma_buffer_list); if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || (bf_get_be32(lpfc_grp_hdr_file_type, image) != @@ -9077,12 +10043,13 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) be32_to_cpu(image->magic_number), bf_get_be32(lpfc_grp_hdr_file_type, image), bf_get_be32(lpfc_grp_hdr_id, image)); - return -EINVAL; + rc = -EINVAL; + goto release_out; } lpfc_decode_firmware_rev(phba, fwrev, 1); if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3023 Updating Firmware. Current Version:%s " + "3023 Updating Firmware, Current Version:%s " "New Version:%s\n", fwrev, image->revision); for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { @@ -9090,7 +10057,7 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) GFP_KERNEL); if (!dmabuf) { rc = -ENOMEM; - goto out; + goto release_out; } dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, @@ -9099,7 +10066,7 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) if (!dmabuf->virt) { kfree(dmabuf); rc = -ENOMEM; - goto out; + goto release_out; } list_add_tail(&dmabuf->list, &dma_buffer_list); } @@ -9119,23 +10086,61 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) } rc = lpfc_wr_object(phba, &dma_buffer_list, (fw->size - offset), &offset); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3024 Firmware update failed. " - "%d\n", rc); - goto out; - } + if (rc) + goto release_out; } rc = offset; } -out: + +release_out: list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { list_del(&dmabuf->list); dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, dmabuf->virt, dmabuf->phys); kfree(dmabuf); } - return rc; + release_firmware(fw); +out: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3024 Firmware update done: %d.\n", rc); + return; +} + +/** + * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to perform Linux generic firmware upgrade on device + * that supports such feature. + **/ +int +lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) +{ + uint8_t file_name[ELX_MODEL_NAME_SIZE]; + int ret; + const struct firmware *fw; + + /* Only supported on SLI4 interface type 2 for now */ + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + LPFC_SLI_INTF_IF_TYPE_2) + return -EPERM; + + snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); + + if (fw_upgrade == INT_FW_UPGRADE) { + ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, + file_name, &phba->pcidev->dev, + GFP_KERNEL, (void *)phba, + lpfc_write_firmware); + } else if (fw_upgrade == RUN_FW_UPGRADE) { + ret = request_firmware(&fw, file_name, &phba->pcidev->dev); + if (!ret) + lpfc_write_firmware(fw, (void *)phba); + } else { + ret = -EINVAL; + } + + return ret; } /** @@ -9156,18 +10161,15 @@ out: * 0 - driver can claim the device * negative value - driver can not claim the device **/ -static int __devinit +static int lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) { struct lpfc_hba *phba; struct lpfc_vport *vport = NULL; struct Scsi_Host *shost = NULL; - int error; + int error, ret; uint32_t cfg_mode, intr_mode; - int mcnt; - int adjusted_fcp_eq_count; - const struct firmware *fw; - uint8_t file_name[16]; + int adjusted_fcp_io_channel; /* Allocate memory for HBA structure */ phba = lpfc_hba_alloc(pdev); @@ -9255,74 +10257,41 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ /* Now, trying to enable interrupt and bring up the device */ cfg_mode = phba->cfg_use_msi; - while (true) { - /* Put device to a known state before enabling interrupt */ - lpfc_stop_port(phba); - /* Configure and enable interrupt */ - intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); - if (intr_mode == LPFC_INTR_ERROR) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0426 Failed to enable interrupt.\n"); - error = -ENODEV; - goto out_free_sysfs_attr; - } - /* Default to single EQ for non-MSI-X */ - if (phba->intr_type != MSIX) - adjusted_fcp_eq_count = 0; - else if (phba->sli4_hba.msix_vec_nr < - phba->cfg_fcp_eq_count + 1) - adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; - else - adjusted_fcp_eq_count = phba->cfg_fcp_eq_count; - phba->cfg_fcp_eq_count = adjusted_fcp_eq_count; - /* Set up SLI-4 HBA */ - if (lpfc_sli4_hba_setup(phba)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1421 Failed to set up hba\n"); - error = -ENODEV; - goto out_disable_intr; - } - /* Send NOP mbx cmds for non-INTx mode active interrupt test */ - if (intr_mode != 0) - mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, - LPFC_ACT_INTR_CNT); - - /* Check active interrupts received only for MSI/MSI-X */ - if (intr_mode == 0 || - phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { - /* Log the current active interrupt mode */ - phba->intr_mode = intr_mode; - lpfc_log_intr_mode(phba, intr_mode); - break; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0451 Configure interrupt mode (%d) " - "failed active interrupt test.\n", - intr_mode); - /* Unset the previous SLI-4 HBA setup. */ - /* - * TODO: Is this operation compatible with IF TYPE 2 - * devices? All port state is deleted and cleared. - */ - lpfc_sli4_unset_hba(phba); - /* Try next level of interrupt mode */ - cfg_mode = --intr_mode; + /* Put device to a known state before enabling interrupt */ + lpfc_stop_port(phba); + /* Configure and enable interrupt */ + intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0426 Failed to enable interrupt.\n"); + error = -ENODEV; + goto out_free_sysfs_attr; + } + /* Default to single EQ for non-MSI-X */ + if (phba->intr_type != MSIX) + adjusted_fcp_io_channel = 1; + else + adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; + phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; + /* Set up SLI-4 HBA */ + if (lpfc_sli4_hba_setup(phba)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1421 Failed to set up hba\n"); + error = -ENODEV; + goto out_disable_intr; } + /* Log the current active interrupt mode */ + phba->intr_mode = intr_mode; + lpfc_log_intr_mode(phba, intr_mode); + /* Perform post initialization setup */ lpfc_post_init_setup(phba); - /* check for firmware upgrade or downgrade (if_type 2 only) */ - if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == - LPFC_SLI_INTF_IF_TYPE_2) { - snprintf(file_name, 16, "%s.grp", phba->ModelName); - error = request_firmware(&fw, file_name, &phba->pcidev->dev); - if (!error) { - lpfc_write_firmware(phba, fw); - release_firmware(fw); - } - } + /* check for firmware upgrade or downgrade */ + if (phba->cfg_request_firmware_upgrade) + ret = lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); /* Check if there are static vports to be created. */ lpfc_create_static_vport(phba); @@ -9360,7 +10329,7 @@ out_free_phba: * removed from PCI bus, it performs all the necessary cleanup for the HBA * device to be removed from the PCI subsystem properly. **/ -static void __devexit +static void lpfc_pci_remove_one_s4(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); @@ -9380,8 +10349,11 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) /* Release all the vports against this physical port */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + if (vports[i]->port_type == LPFC_PHYSICAL_PORT) + continue; fc_vport_terminate(vports[i]->fc_vport); + } lpfc_destroy_vport_work_array(phba, vports); /* Remove FC host and then SCSI host with the physical port */ @@ -9407,6 +10379,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) * buffers are released to their corresponding pools here. */ lpfc_scsi_free(phba); + lpfc_sli4_driver_resource_unset(phba); /* Unmap adapter Control and Doorbell registers */ @@ -9453,7 +10426,7 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) "2843 PCI device Power Management suspend.\n"); /* Bring down the device */ - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); kthread_stop(phba->worker_thread); @@ -9551,17 +10524,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev) static void lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2828 PCI channel I/O abort preparing for recovery\n"); /* * There may be errored I/Os through HBA, abort all I/Os on txcmplq * and let the SCSI mid-layer to retry them to recover. */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_sli_abort_fcp_rings(phba); } /** @@ -9579,11 +10548,14 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) "2826 PCI channel disable preparing for reset\n"); /* Block any management I/Os to the device */ - lpfc_block_mgmt_io(phba); + lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ + lpfc_sli_flush_fcp_rings(phba); + /* stop all timers */ lpfc_stop_hba_timers(phba); @@ -9591,9 +10563,6 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) lpfc_sli4_disable_intr(phba); lpfc_sli4_queue_destroy(phba); pci_disable_device(phba->pcidev); - - /* Flush all driver's outstanding SCSI I/Os as we are to reset */ - lpfc_sli_flush_fcp_rings(phba); } /** @@ -9752,7 +10721,7 @@ lpfc_io_resume_s4(struct pci_dev *pdev) */ if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { /* Perform device reset */ - lpfc_offline_prep(phba); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); lpfc_sli_brdrestart(phba); /* Bring the device back online */ @@ -9782,7 +10751,7 @@ lpfc_io_resume_s4(struct pci_dev *pdev) * 0 - driver can claim the device * negative value - driver can not claim the device **/ -static int __devinit +static int lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) { int rc; @@ -9810,7 +10779,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) * remove routine, which will perform all the necessary cleanup for the * device to be removed from the PCI subsystem properly. **/ -static void __devexit +static void lpfc_pci_remove_one(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); @@ -10012,6 +10981,168 @@ lpfc_io_resume(struct pci_dev *pdev) return; } +/** + * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter + * @phba: pointer to lpfc hba data structure. + * + * This routine checks to see if OAS is supported for this adapter. If + * supported, the configure Flash Optimized Fabric flag is set. Otherwise, + * the enable oas flag is cleared and the pool created for OAS device data + * is destroyed. + * + **/ +void +lpfc_sli4_oas_verify(struct lpfc_hba *phba) +{ + + if (!phba->cfg_EnableXLane) + return; + + if (phba->sli4_hba.pc_sli4_params.oas_supported) { + phba->cfg_fof = 1; + } else { + phba->cfg_fof = 0; + if (phba->device_data_mem_pool) + mempool_destroy(phba->device_data_mem_pool); + phba->device_data_mem_pool = NULL; + } + + return; +} + +/** + * lpfc_fof_queue_setup - Set up all the fof queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up all the fof queues for the FC HBA + * operation. + * + * Return codes + * 0 - successful + * -ENOMEM - No available memory + **/ +int +lpfc_fof_queue_setup(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + int rc; + + rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); + if (rc) + return -ENOMEM; + + if (phba->cfg_fof) { + + rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, + phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); + if (rc) + goto out_oas_cq; + + rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, + phba->sli4_hba.oas_cq, LPFC_FCP); + if (rc) + goto out_oas_wq; + + phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING]; + phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING]; + } + + return 0; + +out_oas_wq: + lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); +out_oas_cq: + lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); + return rc; + +} + +/** + * lpfc_fof_queue_create - Create all the fof queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate all the fof queues for the FC HBA + * operation. For each SLI4 queue type, the parameters such as queue entry + * count (queue depth) shall be taken from the module parameter. For now, + * we just use some constant number as place holder. + * + * Return codes + * 0 - successful + * -ENOMEM - No availble memory + * -EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_fof_queue_create(struct lpfc_hba *phba) +{ + struct lpfc_queue *qdesc; + + /* Create FOF EQ */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, + phba->sli4_hba.eq_ecount); + if (!qdesc) + goto out_error; + + phba->sli4_hba.fof_eq = qdesc; + + if (phba->cfg_fof) { + + /* Create OAS CQ */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) + goto out_error; + + phba->sli4_hba.oas_cq = qdesc; + + /* Create OAS WQ */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount); + if (!qdesc) + goto out_error; + + phba->sli4_hba.oas_wq = qdesc; + + } + return 0; + +out_error: + lpfc_fof_queue_destroy(phba); + return -ENOMEM; +} + +/** + * lpfc_fof_queue_destroy - Destroy all the fof queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to release all the SLI4 queues with the FC HBA + * operation. + * + * Return codes + * 0 - successful + **/ +int +lpfc_fof_queue_destroy(struct lpfc_hba *phba) +{ + /* Release FOF Event queue */ + if (phba->sli4_hba.fof_eq != NULL) { + lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); + phba->sli4_hba.fof_eq = NULL; + } + + /* Release OAS Completion queue */ + if (phba->sli4_hba.oas_cq != NULL) { + lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); + phba->sli4_hba.oas_cq = NULL; + } + + /* Release OAS Work queue */ + if (phba->sli4_hba.oas_wq != NULL) { + lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); + phba->sli4_hba.oas_wq = NULL; + } + return 0; +} + static struct pci_device_id lpfc_id_table[] = { {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, PCI_ANY_ID, PCI_ANY_ID, }, @@ -10103,12 +11234,16 @@ static struct pci_device_id lpfc_id_table[] = { PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF, + PCI_ANY_ID, PCI_ANY_ID, }, { 0 } }; MODULE_DEVICE_TABLE(pci, lpfc_id_table); -static struct pci_error_handlers lpfc_err_handler = { +static const struct pci_error_handlers lpfc_err_handler = { .error_detected = lpfc_io_error_detected, .slot_reset = lpfc_io_slot_reset, .resume = lpfc_io_resume, @@ -10118,12 +11253,22 @@ static struct pci_driver lpfc_driver = { .name = LPFC_DRIVER_NAME, .id_table = lpfc_id_table, .probe = lpfc_pci_probe_one, - .remove = __devexit_p(lpfc_pci_remove_one), + .remove = lpfc_pci_remove_one, .suspend = lpfc_pci_suspend_one, .resume = lpfc_pci_resume_one, .err_handler = &lpfc_err_handler, }; +static const struct file_operations lpfc_mgmt_fop = { + .owner = THIS_MODULE, +}; + +static struct miscdevice lpfc_mgmt_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "lpfcmgmt", + .fops = &lpfc_mgmt_fop, +}; + /** * lpfc_init - lpfc module initialization routine * @@ -10139,11 +11284,17 @@ static struct pci_driver lpfc_driver = { static int __init lpfc_init(void) { + int cpu; int error = 0; printk(LPFC_MODULE_DESC "\n"); printk(LPFC_COPYRIGHT "\n"); + error = misc_register(&lpfc_mgmt_dev); + if (error) + printk(KERN_ERR "Could not register lpfcmgmt device, " + "misc_register returned with status %d", error); + if (lpfc_enable_npiv) { lpfc_transport_functions.vport_create = lpfc_vport_create; lpfc_transport_functions.vport_delete = lpfc_vport_delete; @@ -10160,6 +11311,13 @@ lpfc_init(void) return -ENOMEM; } } + + /* Initialize in case vector mapping is needed */ + lpfc_used_cpu = NULL; + lpfc_present_cpu = 0; + for_each_present_cpu(cpu) + lpfc_present_cpu++; + error = pci_register_driver(&lpfc_driver); if (error) { fc_release_transport(lpfc_transport_template); @@ -10180,6 +11338,7 @@ lpfc_init(void) static void __exit lpfc_exit(void) { + misc_deregister(&lpfc_mgmt_dev); pci_unregister_driver(&lpfc_driver); fc_release_transport(lpfc_transport_template); if (lpfc_enable_npiv) @@ -10197,6 +11356,7 @@ lpfc_exit(void) (1L << _dump_buf_dif_order), _dump_buf_dif); free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); } + kfree(lpfc_used_cpu); } module_init(lpfc_init); diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index baf53e6c2bd..2a4e5d21eab 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -37,6 +37,7 @@ #define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ #define LOG_FIP 0x00020000 /* FIP events */ #define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */ +#define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 20336f09fb3..1f292e29d56 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -92,7 +92,7 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, memset(mp->virt, 0, LPFC_BPL_SIZE); INIT_LIST_HEAD(&mp->list); /* save address for completion */ - pmb->context2 = (uint8_t *) mp; + pmb->context1 = (uint8_t *)mp; mb->un.varWords[3] = putPaddrLow(mp->phys); mb->un.varWords[4] = putPaddrHigh(mp->phys); mb->un.varDmp.sli4_length = sizeof(struct static_vport_info); @@ -178,7 +178,8 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) mb->mbxOwner = OWN_HOST; mb->un.varDmp.cv = 1; mb->un.varDmp.type = DMP_NV_PARAMS; - mb->un.varDmp.entry_index = 0; + if (phba->sli_rev < LPFC_SLI_REV4) + mb->un.varDmp.entry_index = 0; mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID; mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE; mb->un.varDmp.co = 0; @@ -361,7 +362,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) /* NEW_FEATURE * SLI-2, Coalescing Response Feature. */ - if (phba->cfg_cr_delay) { + if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) { mb->un.varCfgLnk.cr = 1; mb->un.varCfgLnk.ci = 1; mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay; @@ -377,7 +378,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) mb->un.varCfgLnk.crtov = phba->fc_crtov; mb->un.varCfgLnk.citov = phba->fc_citov; - if (phba->cfg_ack0) + if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4)) mb->un.varCfgLnk.ack0_enable = 1; mb->mbxCommand = MBX_CONFIG_LINK; @@ -950,44 +951,47 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba) for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; - pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE: + pring->sli.sli3.sizeCiocb = + phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE : SLI2_IOCB_CMD_SIZE; - pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE: + pring->sli.sli3.sizeRiocb = + phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE : SLI2_IOCB_RSP_SIZE; /* A ring MUST have both cmd and rsp entries defined to be valid */ - if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) { + if ((pring->sli.sli3.numCiocb == 0) || + (pring->sli.sli3.numRiocb == 0)) { pcbp->rdsc[i].cmdEntries = 0; pcbp->rdsc[i].rspEntries = 0; pcbp->rdsc[i].cmdAddrHigh = 0; pcbp->rdsc[i].rspAddrHigh = 0; pcbp->rdsc[i].cmdAddrLow = 0; pcbp->rdsc[i].rspAddrLow = 0; - pring->cmdringaddr = NULL; - pring->rspringaddr = NULL; + pring->sli.sli3.cmdringaddr = NULL; + pring->sli.sli3.rspringaddr = NULL; continue; } /* Command ring setup for ring */ - pring->cmdringaddr = (void *)&phba->IOCBs[iocbCnt]; - pcbp->rdsc[i].cmdEntries = pring->numCiocb; + pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt]; + pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb; offset = (uint8_t *) &phba->IOCBs[iocbCnt] - (uint8_t *) phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr); pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr); - iocbCnt += pring->numCiocb; + iocbCnt += pring->sli.sli3.numCiocb; /* Response ring setup for ring */ - pring->rspringaddr = (void *) &phba->IOCBs[iocbCnt]; + pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt]; - pcbp->rdsc[i].rspEntries = pring->numRiocb; + pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb; offset = (uint8_t *)&phba->IOCBs[iocbCnt] - (uint8_t *)phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr); pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr); - iocbCnt += pring->numRiocb; + iocbCnt += pring->sli.sli3.numRiocb; } } @@ -1609,12 +1613,15 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) switch (mbox->mbxCommand) { case MBX_WRITE_NV: /* 0x03 */ + case MBX_DUMP_MEMORY: /* 0x17 */ case MBX_UPDATE_CFG: /* 0x1B */ case MBX_DOWN_LOAD: /* 0x1C */ case MBX_DEL_LD_ENTRY: /* 0x1D */ + case MBX_WRITE_VPARMS: /* 0x32 */ case MBX_LOAD_AREA: /* 0x81 */ case MBX_WRITE_WWN: /* 0x98 */ case MBX_LOAD_EXP_ROM: /* 0x9C */ + case MBX_ACCESS_VDATA: /* 0xA5 */ return LPFC_MBOX_TMO_FLASH_CMD; case MBX_SLI4_CONFIG: /* 0x9b */ subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq); @@ -1625,11 +1632,17 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) case LPFC_MBOX_OPCODE_WRITE_OBJECT: case LPFC_MBOX_OPCODE_READ_OBJECT_LIST: case LPFC_MBOX_OPCODE_DELETE_OBJECT: - case LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG: case LPFC_MBOX_OPCODE_GET_PROFILE_LIST: case LPFC_MBOX_OPCODE_SET_ACT_PROFILE: + case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG: case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG: case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG: + case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES: + case LPFC_MBOX_OPCODE_SEND_ACTIVATION: + case LPFC_MBOX_OPCODE_RESET_LICENSES: + case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG: + case LPFC_MBOX_OPCODE_GET_VPD_DATA: + case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG: return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO; } } @@ -2114,33 +2127,44 @@ void lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) { struct lpfc_mbx_reg_vfi *reg_vfi; + struct lpfc_hba *phba = vport->phba; memset(mbox, 0, sizeof(*mbox)); reg_vfi = &mbox->u.mqe.un.reg_vfi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); bf_set(lpfc_reg_vfi_vfi, reg_vfi, - vport->phba->sli4_hba.vfi_ids[vport->vfi]); - bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); - bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]); + phba->sli4_hba.vfi_ids[vport->vfi]); + bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi); + bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]); memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); - reg_vfi->e_d_tov = vport->phba->fc_edtov; - reg_vfi->r_a_tov = vport->phba->fc_ratov; + reg_vfi->e_d_tov = phba->fc_edtov; + reg_vfi->r_a_tov = phba->fc_ratov; reg_vfi->bde.addrHigh = putPaddrHigh(phys); reg_vfi->bde.addrLow = putPaddrLow(phys); reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); + + /* Only FC supports upd bit */ + if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) && + (vport->fc_flag & FC_VFI_REGISTERED) && + (!phba->fc_topology_changed)) { + bf_set(lpfc_reg_vfi_vp, reg_vfi, 0); + bf_set(lpfc_reg_vfi_upd, reg_vfi, 1); + } lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, "3134 Register VFI, mydid:x%x, fcfi:%d, " - " vfi:%d, vpi:%d, fc_pname:%x%x\n", + " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x" + " port_state:x%x topology chg:%d\n", vport->fc_myDID, - vport->phba->fcf.fcfi, - vport->phba->sli4_hba.vfi_ids[vport->vfi], - vport->phba->vpi_ids[vport->vpi], - reg_vfi->wwn[0], reg_vfi->wwn[1]); + phba->fcf.fcfi, + phba->sli4_hba.vfi_ids[vport->vfi], + phba->vpi_ids[vport->vpi], + reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag, + vport->port_state, phba->fc_topology_changed); } /** diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index ade763d3930..3fa65338d3f 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -38,10 +38,29 @@ #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_crtn.h" +#include "lpfc_logmsg.h" #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ +#define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */ +int +lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { + size_t bytes; + int max_xri = phba->sli4_hba.max_cfg_param.max_xri; + + if (max_xri <= 0) + return -ENOMEM; + bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) * + sizeof(unsigned long); + phba->cfg_rrq_xri_bitmap_sz = bytes; + phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, + bytes); + if (!phba->active_rrq_pool) + return -ENOMEM; + else + return 0; +} /** * lpfc_mem_alloc - create and allocate all PCI and memory pools @@ -64,18 +83,26 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; int i; - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { + /* Calculate alignment */ + if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) + i = phba->cfg_sg_dma_buf_size; + else + i = SLI4_PAGE_SIZE; + phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", phba->pcidev, phba->cfg_sg_dma_buf_size, - phba->cfg_sg_dma_buf_size, + i, 0); - else + } else { phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", phba->pcidev, phba->cfg_sg_dma_buf_size, align, 0); + } + if (!phba->lpfc_scsi_dma_buf_pool) goto fail; @@ -138,6 +165,16 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) phba->lpfc_drb_pool = NULL; } + if (phba->cfg_EnableXLane) { + phba->device_data_mem_pool = mempool_create_kmalloc_pool( + LPFC_DEVICE_DATA_POOL_SIZE, + sizeof(struct lpfc_device_data)); + if (!phba->device_data_mem_pool) + goto fail_free_hrb_pool; + } else { + phba->device_data_mem_pool = NULL; + } + return 0; fail_free_hrb_pool: pci_pool_destroy(phba->lpfc_hrb_pool); @@ -180,6 +217,7 @@ lpfc_mem_free(struct lpfc_hba *phba) { int i; struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; + struct lpfc_device_data *device_data; /* Free HBQ pools */ lpfc_sli_hbqbuf_free_all(phba); @@ -194,9 +232,17 @@ lpfc_mem_free(struct lpfc_hba *phba) pci_pool_destroy(phba->lpfc_hbq_pool); phba->lpfc_hbq_pool = NULL; + if (phba->rrq_pool) + mempool_destroy(phba->rrq_pool); + phba->rrq_pool = NULL; + /* Free NLP memory pool */ mempool_destroy(phba->nlp_mem_pool); phba->nlp_mem_pool = NULL; + if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { + mempool_destroy(phba->active_rrq_pool); + phba->active_rrq_pool = NULL; + } /* Free mbox memory pool */ mempool_destroy(phba->mbox_mem_pool); @@ -215,6 +261,19 @@ lpfc_mem_free(struct lpfc_hba *phba) pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); phba->lpfc_scsi_dma_buf_pool = NULL; + /* Free Device Data memory pool */ + if (phba->device_data_mem_pool) { + /* Ensure all objects have been returned to the pool */ + while (!list_empty(&phba->luns)) { + device_data = list_first_entry(&phba->luns, + struct lpfc_device_data, + listentry); + list_del(&device_data->listentry); + mempool_free(device_data, phba->device_data_mem_pool); + } + mempool_destroy(phba->device_data_mem_pool); + } + phba->device_data_mem_pool = NULL; return; } diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index e8bb0055994..c342f6afd74 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -48,6 +48,10 @@ static int lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_name *nn, struct lpfc_name *pn) { + /* First, we MUST have a RPI registered */ + if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) + return 0; + /* Compare the ADISC rsp WWNN / WWPN matches our internal node * table entry for that node. */ @@ -199,8 +203,6 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, int lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { - LIST_HEAD(completions); - LIST_HEAD(txcmplq_completions); LIST_HEAD(abort_list); struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; @@ -212,33 +214,27 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) "Data: x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); - + /* Clean up all fabric IOs first.*/ lpfc_fabric_abort_nport(ndlp); - /* First check the txq */ + /* + * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list + * of all ELS IOs that need an ABTS. The IOs need to stay on the + * txcmplq so that the abort operation completes them successfully. + */ spin_lock_irq(&phba->hbalock); - list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { - /* Check to see if iocb matches the nport we are looking for */ - if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { - /* It matches, so deque and call compl with anp error */ - list_move_tail(&iocb->list, &completions); - pring->txq_cnt--; - } - } - - /* Next check the txcmplq */ - list_splice_init(&pring->txcmplq, &txcmplq_completions); - spin_unlock_irq(&phba->hbalock); - - list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) { - /* Check to see if iocb matches the nport we are looking for */ + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock(&pring->ring_lock); + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { + /* Add to abort_list on on NDLP match. */ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) list_add_tail(&iocb->dlist, &abort_list); } - spin_lock_irq(&phba->hbalock); - list_splice(&txcmplq_completions, &pring->txcmplq); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); spin_unlock_irq(&phba->hbalock); + /* Abort the targeted IOs and remove them from the abort list. */ list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) { spin_lock_irq(&phba->hbalock); list_del_init(&iocb->dlist); @@ -246,9 +242,28 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) spin_unlock_irq(&phba->hbalock); } + INIT_LIST_HEAD(&abort_list); + + /* Now process the txq */ + spin_lock_irq(&phba->hbalock); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock(&pring->ring_lock); + + list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { + /* Check to see if iocb matches the nport we are looking for */ + if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { + list_del_init(&iocb->list); + list_add_tail(&iocb->list, &abort_list); + } + } + + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); + spin_unlock_irq(&phba->hbalock); + /* Cancel all the IOCBs from the completions list */ - lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, - IOERR_SLI_ABORTED); + lpfc_sli_cancel_iocbs(phba, &abort_list, + IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); return 0; @@ -329,9 +344,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", + "0114 PLOGI chkparm OK Data: x%x x%x x%x " + "x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, - ndlp->nlp_rpi); + ndlp->nlp_rpi, vport->port_state, + vport->fc_flag); if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) ndlp->nlp_fcp_info |= CLASS2; @@ -363,8 +380,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 1; } + /* Check for Nport to NPort pt2pt protocol */ if ((vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_PT2PT_PLOGI)) { + /* rcv'ed PLOGI decides what our NPortId will be */ vport->fc_myDID = icmd->un.rcvels.parmRo; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -378,6 +397,13 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, mempool_free(mbox, phba->mbox_mem_pool); goto out; } + /* + * For SLI4, the VFI/VPI are registered AFTER the + * Nport with the higher WWPN sends us a PLOGI with + * our assigned NPortId. + */ + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_issue_reg_vfi(vport); lpfc_can_disctmo(vport); } @@ -385,6 +411,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (!mbox) goto out; + /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */ + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_unreg_rpi(vport, ndlp); + rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, (uint8_t *) sp, mbox, ndlp->nlp_rpi); if (rc) { @@ -432,11 +462,15 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_unlock_irq(shost->host_lock); stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; - lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, + rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, mbox); + if (rc) + mempool_free(mbox, phba->mbox_mem_pool); return 1; } - lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); + rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); + if (rc) + mempool_free(mbox, phba->mbox_mem_pool); return 1; out: stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; @@ -445,11 +479,43 @@ out: return 0; } +/** + * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object + * + * This routine is invoked to issue a completion to a rcv'ed + * ADISC or PDISC after the paused RPI has been resumed. + **/ +static void +lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_vport *vport; + struct lpfc_iocbq *elsiocb; + struct lpfc_nodelist *ndlp; + uint32_t cmd; + + elsiocb = (struct lpfc_iocbq *)mboxq->context1; + ndlp = (struct lpfc_nodelist *) mboxq->context2; + vport = mboxq->vport; + cmd = elsiocb->drvrTimeout; + + if (cmd == ELS_CMD_ADISC) { + lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp); + } else { + lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb, + ndlp, NULL); + } + kfree(elsiocb); + mempool_free(mboxq, phba->mbox_mem_pool); +} + static int lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_iocbq *elsiocb; struct lpfc_dmabuf *pcmd; struct serv_parm *sp; struct lpfc_name *pnn, *ppn; @@ -475,12 +541,43 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, icmd = &cmdiocb->iocb; if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) { + + /* + * As soon as we send ACC, the remote NPort can + * start sending us data. Thus, for SLI4 we must + * resume the RPI before the ACC goes out. + */ + if (vport->phba->sli_rev == LPFC_SLI_REV4) { + elsiocb = kmalloc(sizeof(struct lpfc_iocbq), + GFP_KERNEL); + if (elsiocb) { + + /* Save info from cmd IOCB used in rsp */ + memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb, + sizeof(struct lpfc_iocbq)); + + /* Save the ELS cmd */ + elsiocb->drvrTimeout = cmd; + + lpfc_sli4_resume_rpi(ndlp, + lpfc_mbx_cmpl_resume_rpi, elsiocb); + goto out; + } + } + if (cmd == ELS_CMD_ADISC) { lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp); } else { - lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, - NULL); + lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, + ndlp, NULL); } +out: + /* If we are authenticated, move to the proper state */ + if (ndlp->nlp_type & NLP_FCP_TARGET) + lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); + else + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + return 1; } /* Reject this request because invalid parameters */ @@ -491,7 +588,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); /* 1 sec timeout */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; @@ -548,7 +645,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -565,7 +663,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { /* Only try to re-login if this is NOT a Fabric Node */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -603,11 +702,15 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_flag &= ~NLP_FIRSTBURST; if (npr->prliType == PRLI_FCP_TYPE) { if (npr->initiatorFunc) ndlp->nlp_type |= NLP_FCP_INITIATOR; - if (npr->targetFunc) + if (npr->targetFunc) { ndlp->nlp_type |= NLP_FCP_TARGET; + if (npr->writeXferRdyDis) + ndlp->nlp_flag |= NLP_FIRSTBURST; + } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; } @@ -886,7 +989,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } /* Put ndlp in npr state set plogi timer for 1 sec */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -1031,6 +1134,13 @@ out: "0261 Cannot Register NameServer login\n"); } + /* + ** In case the node reference counter does not go to zero, ensure that + ** the stale state for the node is not processed. + */ + + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DEFER_RM; spin_unlock_irq(shost->host_lock); @@ -1213,7 +1323,8 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, if ((irsp->ulpStatus) || (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { /* 1 sec timeout */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -1229,7 +1340,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, } if (phba->sli_rev == LPFC_SLI_REV4) { - rc = lpfc_sli4_resume_rpi(ndlp); + rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL); if (rc) { /* Stay in state and retry. */ ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; @@ -1419,7 +1530,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, } /* Put ndlp in npr state set plogi timer for 1 sec */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -1580,12 +1692,16 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Check out PRLI rsp */ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_flag &= ~NLP_FIRSTBURST; if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && (npr->prliType == PRLI_FCP_TYPE)) { if (npr->initiatorFunc) ndlp->nlp_type |= NLP_FCP_INITIATOR; - if (npr->targetFunc) + if (npr->targetFunc) { ndlp->nlp_type |= NLP_FCP_TARGET; + if (npr->writeXferRdyDis) + ndlp->nlp_flag |= NLP_FIRSTBURST; + } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; } @@ -1694,6 +1810,117 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport, } static uint32_t +lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; + struct ls_rjt stat; + + memset(&stat, 0, sizeof(struct ls_rjt)); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; + struct ls_rjt stat; + + memset(&stat, 0, sizeof(struct ls_rjt)); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; + + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag &= NLP_LOGO_ACC; + spin_unlock_irq(shost->host_lock); + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; + struct ls_rjt stat; + + memset(&stat, 0, sizeof(struct ls_rjt)); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; + struct ls_rjt stat; + + memset(&stat, 0, sizeof(struct ls_rjt)); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + spin_unlock_irq(shost->host_lock); + lpfc_disc_set_adisc(vport, ndlp); + return ndlp->nlp_state; +} + +static uint32_t +lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + /* + * Take no action. If a LOGO is outstanding, then possibly DevLoss has + * timed out and is calling for Device Remove. In this case, the LOGO + * must be allowed to complete in state LOGO_ISSUE so that the rpi + * and other NLP flags are correctly cleaned up. + */ + return ndlp->nlp_state; +} + +static uint32_t +lpfc_device_recov_logo_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + /* + * Device Recovery events have no meaning for a node with a LOGO + * outstanding. The LOGO has to complete first and handle the + * node from that point. + */ + return ndlp->nlp_state; +} + +static uint32_t lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { @@ -1944,7 +2171,8 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; ndlp->nlp_flag &= ~NLP_NPR_ADISC; @@ -1964,13 +2192,16 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb, *rspiocb; IOCB_t *irsp; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; irsp = &rspiocb->iocb; if (irsp->ulpStatus) { + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DEFER_RM; + spin_unlock_irq(shost->host_lock); return NLP_STE_FREED_NODE; } return ndlp->nlp_state; @@ -1999,6 +2230,8 @@ lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + /* For the fabric port just clear the fc flags. */ if (ndlp->nlp_DID == Fabric_DID) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); @@ -2213,6 +2446,20 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) lpfc_device_rm_prli_issue, /* DEVICE_RM */ lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */ + lpfc_rcv_plogi_logo_issue, /* RCV_PLOGI LOGO_ISSUE */ + lpfc_rcv_prli_logo_issue, /* RCV_PRLI */ + lpfc_rcv_logo_logo_issue, /* RCV_LOGO */ + lpfc_rcv_padisc_logo_issue, /* RCV_ADISC */ + lpfc_rcv_padisc_logo_issue, /* RCV_PDISC */ + lpfc_rcv_prlo_logo_issue, /* RCV_PRLO */ + lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ + lpfc_disc_illegal, /* CMPL_PRLI */ + lpfc_cmpl_logo_logo_issue, /* CMPL_LOGO */ + lpfc_disc_illegal, /* CMPL_ADISC */ + lpfc_disc_illegal, /* CMPL_REG_LOGIN */ + lpfc_device_rm_logo_issue, /* DEVICE_RM */ + lpfc_device_recov_logo_issue, /* DEVICE_RECOVERY */ + lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */ lpfc_rcv_prli_unmap_node, /* RCV_PRLI */ lpfc_rcv_logo_unmap_node, /* RCV_LOGO */ diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c60f5d0b386..2df11daad85 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -24,6 +24,8 @@ #include <linux/export.h> #include <linux/delay.h> #include <asm/unaligned.h> +#include <linux/crc-t10dif.h> +#include <net/checksum.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -39,8 +41,8 @@ #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" @@ -48,16 +50,16 @@ #define LPFC_RESET_WAIT 2 #define LPFC_ABORT_WAIT 2 -int _dump_buf_done; +int _dump_buf_done = 1; static char *dif_op_str[] = { - "SCSI_PROT_NORMAL", - "SCSI_PROT_READ_INSERT", - "SCSI_PROT_WRITE_STRIP", - "SCSI_PROT_READ_STRIP", - "SCSI_PROT_WRITE_INSERT", - "SCSI_PROT_READ_PASS", - "SCSI_PROT_WRITE_PASS", + "PROT_NORMAL", + "PROT_READ_INSERT", + "PROT_WRITE_STRIP", + "PROT_READ_STRIP", + "PROT_WRITE_INSERT", + "PROT_READ_PASS", + "PROT_WRITE_PASS", }; struct scsi_dif_tuple { @@ -66,10 +68,23 @@ struct scsi_dif_tuple { __be32 ref_tag; /* Target LBA or indirect LBA */ }; +static struct lpfc_rport_data * +lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; + + if (vport->phba->cfg_fof) + return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; + else + return (struct lpfc_rport_data *)sdev->hostdata; +} + static void lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); static void lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); +static int +lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); static void lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) @@ -128,6 +143,30 @@ lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) } } +static inline unsigned +lpfc_cmd_blksize(struct scsi_cmnd *sc) +{ + return sc->device->sector_size; +} + +#define LPFC_CHECK_PROTECT_GUARD 1 +#define LPFC_CHECK_PROTECT_REF 2 +static inline unsigned +lpfc_cmd_protect(struct scsi_cmnd *sc, int flag) +{ + return 1; +} + +static inline unsigned +lpfc_cmd_guard_csum(struct scsi_cmnd *sc) +{ + if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF) + return 0; + if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP) + return 1; + return 0; +} + /** * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. * @phba: Pointer to HBA object. @@ -276,9 +315,27 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) unsigned long new_queue_depth, old_queue_depth; old_queue_depth = sdev->queue_depth; - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + + switch (reason) { + case SCSI_QDEPTH_DEFAULT: + /* change request from sysfs, fall through */ + case SCSI_QDEPTH_RAMP_UP: + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + break; + case SCSI_QDEPTH_QFULL: + if (scsi_track_queue_full(sdev, qdepth) == 0) + return sdev->queue_depth; + + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "0711 detected queue full - lun queue " + "depth adjusted to %d.\n", sdev->queue_depth); + break; + default: + return -EOPNOTSUPP; + } + new_queue_depth = sdev->queue_depth; - rdata = sdev->hostdata; + rdata = lpfc_rport_data_from_scsi_device(sdev); if (rdata) lpfc_send_sdev_queuedepth_change_event(phba, vport, rdata->pnode, sdev->lun, @@ -288,6 +345,26 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) } /** + * lpfc_change_queue_type() - Change a device's scsi tag queuing type + * @sdev: Pointer the scsi device whose queue depth is to change + * @tag_type: Identifier for queue tag type + */ +static int +lpfc_change_queue_type(struct scsi_device *sdev, int tag_type) +{ + if (sdev->tagged_supported) { + scsi_set_tag_type(sdev, tag_type); + if (tag_type) + scsi_activate_tcq(sdev, sdev->queue_depth); + else + scsi_deactivate_tcq(sdev, sdev->queue_depth); + } else + tag_type = 0; + + return tag_type; +} + +/** * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread * @phba: The Hba for which this call is being executed. * @@ -329,50 +406,6 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba) } /** - * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread - * @phba: The Hba for which this call is being executed. - * - * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine - * post at most 1 event every 5 minute after last_ramp_up_time or - * last_rsrc_error_time. This routine wakes up worker thread of @phba - * to process WORKER_RAM_DOWN_EVENT event. - * - * This routine should be called with no lock held. - **/ -static inline void -lpfc_rampup_queue_depth(struct lpfc_vport *vport, - uint32_t queue_depth) -{ - unsigned long flags; - struct lpfc_hba *phba = vport->phba; - uint32_t evt_posted; - atomic_inc(&phba->num_cmd_success); - - if (vport->cfg_lun_queue_depth <= queue_depth) - return; - spin_lock_irqsave(&phba->hbalock, flags); - if (time_before(jiffies, - phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) || - time_before(jiffies, - phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) { - spin_unlock_irqrestore(&phba->hbalock, flags); - return; - } - phba->last_ramp_up_time = jiffies; - spin_unlock_irqrestore(&phba->hbalock, flags); - - spin_lock_irqsave(&phba->pport->work_port_lock, flags); - evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE; - if (!evt_posted) - phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; - spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); - - if (!evt_posted) - lpfc_worker_wake_up(phba); - return; -} - -/** * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler * @phba: The Hba for which this call is being executed. * @@ -393,6 +426,14 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) num_rsrc_err = atomic_read(&phba->num_rsrc_err); num_cmd_success = atomic_read(&phba->num_cmd_success); + /* + * The error and success command counters are global per + * driver instance. If another handler has already + * operated on this error event, just exit. + */ + if (num_rsrc_err == 0) + return; + vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { @@ -416,41 +457,6 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) } /** - * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler - * @phba: The Hba for which this call is being executed. - * - * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker - * thread.This routine increases queue depth for all scsi device on each vport - * associated with @phba by 1. This routine also sets @phba num_rsrc_err and - * num_cmd_success to zero. - **/ -void -lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) -{ - struct lpfc_vport **vports; - struct Scsi_Host *shost; - struct scsi_device *sdev; - int i; - - vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { - shost = lpfc_shost_from_vport(vports[i]); - shost_for_each_device(sdev, shost) { - if (vports[i]->cfg_lun_queue_depth <= - sdev->queue_depth) - continue; - lpfc_change_queue_depth(sdev, - sdev->queue_depth+1, - SCSI_QDEPTH_RAMP_UP); - } - } - lpfc_destroy_vport_work_array(phba, vports); - atomic_set(&phba->num_rsrc_err, 0); - atomic_set(&phba->num_cmd_success, 0); -} - -/** * lpfc_scsi_dev_block - set all scsi hosts to block state * @phba: Pointer to HBA context object. * @@ -506,7 +512,16 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_bpl; uint16_t iotag; - int bcnt; + int bcnt, bpl_size; + + bpl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", + num_to_alloc, phba->cfg_sg_dma_buf_size, + (int)sizeof(struct fcp_cmnd), + (int)sizeof(struct fcp_rsp), bpl_size); for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); @@ -682,7 +697,8 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, rrq_empty = list_empty(&phba->active_rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflag); if (ndlp) { - lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1); + lpfc_set_rrq_active(phba, ndlp, + psb->cur_iocbq.sli4_lxritag, rxid, 1); lpfc_sli4_abts_err_handler(phba, ndlp, axri); } lpfc_release_scsi_buf_s4(phba, psb); @@ -703,7 +719,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); psb->exch_busy = 0; spin_unlock_irqrestore(&phba->hbalock, iflag); - if (pring->txq_cnt) + if (!list_empty(&pring->txq)) lpfc_worker_wake_up(phba); return; @@ -712,72 +728,168 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, } /** - * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block + * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list * @phba: pointer to lpfc hba data structure. + * @post_sblist: pointer to the scsi buffer list. * - * This routine walks the list of scsi buffers that have been allocated and - * repost them to the HBA by using SGL block post. This is needed after a - * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine - * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list - * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers. + * This routine walks a list of scsi buffers that was passed in. It attempts + * to construct blocks of scsi buffer sgls which contains contiguous xris and + * uses the non-embedded SGL block post mailbox commands to post to the port. + * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use + * embedded SGL post mailbox command for posting. The @post_sblist passed in + * must be local list, thus no lock is needed when manipulate the list. * - * Returns: 0 = success, non-zero failure. + * Returns: 0 = failure, non-zero number of successfully posted buffers. **/ int -lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) +lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, + struct list_head *post_sblist, int sb_count) { - struct lpfc_scsi_buf *psb; - int index, status, bcnt = 0, rcnt = 0, rc = 0; - LIST_HEAD(sblist); - - for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) { - psb = phba->sli4_hba.lpfc_scsi_psb_array[index]; - if (psb) { - /* Remove from SCSI buffer list */ - list_del(&psb->list); - /* Add it to a local SCSI buffer list */ - list_add_tail(&psb->list, &sblist); - if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) { - bcnt = rcnt; - rcnt = 0; + struct lpfc_scsi_buf *psb, *psb_next; + int status, sgl_size; + int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; + dma_addr_t pdma_phys_bpl1; + int last_xritag = NO_XRI; + LIST_HEAD(prep_sblist); + LIST_HEAD(blck_sblist); + LIST_HEAD(scsi_sblist); + + /* sanity check */ + if (sb_count <= 0) + return -EINVAL; + + sgl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + list_for_each_entry_safe(psb, psb_next, post_sblist, list) { + list_del_init(&psb->list); + block_cnt++; + if ((last_xritag != NO_XRI) && + (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) { + /* a hole in xri block, form a sgl posting block */ + list_splice_init(&prep_sblist, &blck_sblist); + post_cnt = block_cnt - 1; + /* prepare list for next posting block */ + list_add_tail(&psb->list, &prep_sblist); + block_cnt = 1; + } else { + /* prepare list for next posting block */ + list_add_tail(&psb->list, &prep_sblist); + /* enough sgls for non-embed sgl mbox command */ + if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { + list_splice_init(&prep_sblist, &blck_sblist); + post_cnt = block_cnt; + block_cnt = 0; } - } else - /* A hole present in the XRI array, need to skip */ - bcnt = rcnt; + } + num_posting++; + last_xritag = psb->cur_iocbq.sli4_xritag; - if (index == phba->sli4_hba.scsi_xri_cnt - 1) - /* End of XRI array for SCSI buffer, complete */ - bcnt = rcnt; + /* end of repost sgl list condition for SCSI buffers */ + if (num_posting == sb_count) { + if (post_cnt == 0) { + /* last sgl posting block */ + list_splice_init(&prep_sblist, &blck_sblist); + post_cnt = block_cnt; + } else if (block_cnt == 1) { + /* last single sgl with non-contiguous xri */ + if (sgl_size > SGL_PAGE_SIZE) + pdma_phys_bpl1 = psb->dma_phys_bpl + + SGL_PAGE_SIZE; + else + pdma_phys_bpl1 = 0; + status = lpfc_sli4_post_sgl(phba, + psb->dma_phys_bpl, + pdma_phys_bpl1, + psb->cur_iocbq.sli4_xritag); + if (status) { + /* failure, put on abort scsi list */ + psb->exch_busy = 1; + } else { + /* success, put on SCSI buffer list */ + psb->exch_busy = 0; + psb->status = IOSTAT_SUCCESS; + num_posted++; + } + /* success, put on SCSI buffer sgl list */ + list_add_tail(&psb->list, &scsi_sblist); + } + } - /* Continue until collect up to a nembed page worth of sgls */ - if (bcnt == 0) + /* continue until a nembed page worth of sgls */ + if (post_cnt == 0) continue; - /* Now, post the SCSI buffer list sgls as a block */ - if (!phba->sli4_hba.extents_in_use) - status = lpfc_sli4_post_scsi_sgl_block(phba, - &sblist, - bcnt); - else - status = lpfc_sli4_post_scsi_sgl_blk_ext(phba, - &sblist, - bcnt); - /* Reset SCSI buffer count for next round of posting */ - bcnt = 0; - while (!list_empty(&sblist)) { - list_remove_head(&sblist, psb, struct lpfc_scsi_buf, - list); + + /* post block of SCSI buffer list sgls */ + status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist, + post_cnt); + + /* don't reset xirtag due to hole in xri block */ + if (block_cnt == 0) + last_xritag = NO_XRI; + + /* reset SCSI buffer post count for next round of posting */ + post_cnt = 0; + + /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */ + while (!list_empty(&blck_sblist)) { + list_remove_head(&blck_sblist, psb, + struct lpfc_scsi_buf, list); if (status) { - /* Put this back on the abort scsi list */ + /* failure, put on abort scsi list */ psb->exch_busy = 1; - rc++; } else { + /* success, put on SCSI buffer list */ psb->exch_busy = 0; psb->status = IOSTAT_SUCCESS; + num_posted++; } - /* Put it back into the SCSI buffer list */ - lpfc_release_scsi_buf_s4(phba, psb); + list_add_tail(&psb->list, &scsi_sblist); } } + /* Push SCSI buffers with sgl posted to the availble list */ + while (!list_empty(&scsi_sblist)) { + list_remove_head(&scsi_sblist, psb, + struct lpfc_scsi_buf, list); + lpfc_release_scsi_buf_s4(phba, psb); + } + return num_posted; +} + +/** + * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls + * @phba: pointer to lpfc hba data structure. + * + * This routine walks the list of scsi buffers that have been allocated and + * repost them to the port by using SGL block post. This is needed after a + * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine + * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list + * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers. + * + * Returns: 0 = success, non-zero failure. + **/ +int +lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) +{ + LIST_HEAD(post_sblist); + int num_posted, rc = 0; + + /* get all SCSI buffers need to repost to a local list */ + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock(&phba->scsi_buf_list_put_lock); + list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); + list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); + spin_unlock(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); + + /* post the list of scsi buffer sgls to port if available */ + if (!list_empty(&post_sblist)) { + num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist, + phba->sli4_hba.scsi_xri_cnt); + /* failed to post any scsi buffer, return error */ + if (num_posted == 0) + rc = -EIO; + } return rc; } @@ -786,12 +898,13 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) * @vport: The virtual port for which this call being executed. * @num_to_allocate: The requested number of buffers to allocate. * - * This routine allocates a scsi buffer for device with SLI-4 interface spec, + * This routine allocates scsi buffers for device with SLI-4 interface spec, * the scsi buffer contains all the necessary information needed to initiate - * a SCSI I/O. + * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put + * them on a list, it post them to the port by using SGL block post. * * Return codes: - * int - number of scsi buffers that were allocated. + * int - number of scsi buffers that were allocated and posted. * 0 = failure, less than num_to_alloc is a partial failure. **/ static int @@ -803,23 +916,31 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) IOCB_t *iocb; dma_addr_t pdma_phys_fcp_cmd; dma_addr_t pdma_phys_fcp_rsp; - dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; - uint16_t iotag, last_xritag = NO_XRI, lxri = 0; - int status = 0, index; - int bcnt; - int non_sequential_xri = 0; - LIST_HEAD(sblist); + dma_addr_t pdma_phys_bpl; + uint16_t iotag, lxri = 0; + int bcnt, num_posted, sgl_size; + LIST_HEAD(prep_sblist); + LIST_HEAD(post_sblist); + LIST_HEAD(scsi_sblist); + + sgl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", + num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size, + (int)sizeof(struct fcp_cmnd), + (int)sizeof(struct fcp_rsp)); for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); if (!psb) break; - /* - * Get memory from the pci pool to map the virt space to pci bus - * space for an I/O. The DMA buffer includes space for the - * struct fcp_cmnd, struct fcp_rsp and the number of bde's - * necessary to support the sg_tablesize. + * Get memory from the pci pool to map the virt space to + * pci bus space for an I/O. The DMA buffer includes space + * for the struct fcp_cmnd, struct fcp_rsp and the number + * of bde's necessary to support the sg_tablesize. */ psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, &psb->dma_handle); @@ -827,19 +948,21 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) kfree(psb); break; } - - /* Initialize virtual ptrs to dma_buf region. */ memset(psb->data, 0, phba->cfg_sg_dma_buf_size); - /* Allocate iotag for psb->cur_iocbq. */ - iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); - if (iotag == 0) { + /* + * 4K Page alignment is CRITICAL to BlockGuard, double check + * to be sure. + */ + if (phba->cfg_enable_bg && (((unsigned long)(psb->data) & + (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); + psb->data, psb->dma_handle); kfree(psb); break; } + lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { pci_pool_free(phba->lpfc_scsi_dma_buf_pool, @@ -847,36 +970,37 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) kfree(psb); break; } + + /* Allocate iotag for psb->cur_iocbq. */ + iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); + if (iotag == 0) { + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + psb->data, psb->dma_handle); + kfree(psb); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "3368 Failed to allocated IOTAG for" + " XRI:0x%x\n", lxri); + lpfc_sli4_free_xri(phba, lxri); + break; + } psb->cur_iocbq.sli4_lxritag = lxri; psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; - if (last_xritag != NO_XRI - && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) { - non_sequential_xri = 1; - } else - list_add_tail(&psb->list, &sblist); - last_xritag = psb->cur_iocbq.sli4_xritag; - - index = phba->sli4_hba.scsi_xri_cnt++; psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; - psb->fcp_bpl = psb->data; - psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + psb->fcp_cmnd = (psb->data + sgl_size); psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + sizeof(struct fcp_cmnd)); /* Initialize local short-hand pointers. */ sgl = (struct sli4_sge *)psb->fcp_bpl; pdma_phys_bpl = psb->dma_handle; - pdma_phys_fcp_cmd = - (psb->dma_handle + phba->cfg_sg_dma_buf_size) - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size); pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); /* - * The first two bdes are the FCP_CMD and FCP_RSP. The balance - * are sg list bdes. Initialize the first two and leave the - * rest for queuecommand. + * The first two bdes are the FCP_CMD and FCP_RSP. + * The balance are sg list bdes. Initialize the + * first two and leave the rest for queuecommand. */ sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); @@ -911,62 +1035,27 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) iocb->ulpBdeCount = 1; iocb->ulpLe = 1; iocb->ulpClass = CLASS3; - psb->cur_iocbq.context1 = psb; - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) - pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; - else - pdma_phys_bpl1 = 0; + psb->cur_iocbq.context1 = psb; psb->dma_phys_bpl = pdma_phys_bpl; - phba->sli4_hba.lpfc_scsi_psb_array[index] = psb; - if (non_sequential_xri) { - status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl, - pdma_phys_bpl1, - psb->cur_iocbq.sli4_xritag); - if (status) { - /* Put this back on the abort scsi list */ - psb->exch_busy = 1; - } else { - psb->exch_busy = 0; - psb->status = IOSTAT_SUCCESS; - } - /* Put it back into the SCSI buffer list */ - lpfc_release_scsi_buf_s4(phba, psb); - break; - } - } - if (bcnt) { - if (!phba->sli4_hba.extents_in_use) - status = lpfc_sli4_post_scsi_sgl_block(phba, - &sblist, - bcnt); - else - status = lpfc_sli4_post_scsi_sgl_blk_ext(phba, - &sblist, - bcnt); - - if (status) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "3021 SCSI SGL post error %d\n", - status); - bcnt = 0; - } - /* Reset SCSI buffer count for next round of posting */ - while (!list_empty(&sblist)) { - list_remove_head(&sblist, psb, struct lpfc_scsi_buf, - list); - if (status) { - /* Put this back on the abort scsi list */ - psb->exch_busy = 1; - } else { - psb->exch_busy = 0; - psb->status = IOSTAT_SUCCESS; - } - /* Put it back into the SCSI buffer list */ - lpfc_release_scsi_buf_s4(phba, psb); - } + + /* add the scsi buffer to a post list */ + list_add_tail(&psb->list, &post_sblist); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + phba->sli4_hba.scsi_xri_cnt++; + spin_unlock_irq(&phba->scsi_buf_list_get_lock); } + lpfc_printf_log(phba, KERN_INFO, LOG_BG, + "3021 Allocate %d out of %d requested new SCSI " + "buffers\n", bcnt, num_to_alloc); + + /* post the list of scsi buffer sgls to port if available */ + if (!list_empty(&post_sblist)) + num_posted = lpfc_sli4_post_scsi_sgl_list(phba, + &post_sblist, bcnt); + else + num_posted = 0; - return bcnt + non_sequential_xri; + return num_posted; } /** @@ -1002,17 +1091,22 @@ static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { struct lpfc_scsi_buf * lpfc_cmd = NULL; - struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; + struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; unsigned long iflag = 0; - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); - if (lpfc_cmd) { - lpfc_cmd->seg_cnt = 0; - lpfc_cmd->nonsg_phys = 0; - lpfc_cmd->prot_seg_cnt = 0; + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); + list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, + list); + if (!lpfc_cmd) { + spin_lock(&phba->scsi_buf_list_put_lock); + list_splice(&phba->lpfc_scsi_buf_list_put, + &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + list_remove_head(scsi_buf_list_get, lpfc_cmd, + struct lpfc_scsi_buf, list); + spin_unlock(&phba->scsi_buf_list_put_lock); } - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); return lpfc_cmd; } /** @@ -1029,29 +1123,40 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { - struct lpfc_scsi_buf *lpfc_cmd ; + struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next; unsigned long iflag = 0; int found = 0; - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list, - list) { + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); + list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, + &phba->lpfc_scsi_buf_list_get, list) { if (lpfc_test_rrq_active(phba, ndlp, - lpfc_cmd->cur_iocbq.sli4_xritag)) + lpfc_cmd->cur_iocbq.sli4_lxritag)) continue; list_del(&lpfc_cmd->list); found = 1; - lpfc_cmd->seg_cnt = 0; - lpfc_cmd->nonsg_phys = 0; - lpfc_cmd->prot_seg_cnt = 0; break; } - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, - iflag); + if (!found) { + spin_lock(&phba->scsi_buf_list_put_lock); + list_splice(&phba->lpfc_scsi_buf_list_put, + &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + spin_unlock(&phba->scsi_buf_list_put_lock); + list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, + &phba->lpfc_scsi_buf_list_get, list) { + if (lpfc_test_rrq_active( + phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) + continue; + list_del(&lpfc_cmd->list); + found = 1; + break; + } + } + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); if (!found) return NULL; - else - return lpfc_cmd; + return lpfc_cmd; } /** * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA @@ -1083,10 +1188,15 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); + psb->seg_cnt = 0; + psb->nonsg_phys = 0; + psb->prot_seg_cnt = 0; + + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); psb->pCmd = NULL; - list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); } /** @@ -1104,6 +1214,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; + psb->seg_cnt = 0; + psb->nonsg_phys = 0; + psb->prot_seg_cnt = 0; + if (psb->exch_busy) { spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); @@ -1113,11 +1227,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); } else { - - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); psb->pCmd = NULL; - list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); } } @@ -1191,6 +1305,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); + lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); return 1; } @@ -1273,39 +1388,52 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) return 0; } -static inline unsigned -lpfc_cmd_blksize(struct scsi_cmnd *sc) -{ - return sc->device->sector_size; -} - #ifdef CONFIG_SCSI_LPFC_DEBUG_FS -/* - * Given a scsi cmnd, determine the BlockGuard tags to be used with it + +/* Return if if error injection is detected by Initiator */ +#define BG_ERR_INIT 0x1 +/* Return if if error injection is detected by Target */ +#define BG_ERR_TGT 0x2 +/* Return if if swapping CSUM<-->CRC is required for error injection */ +#define BG_ERR_SWAP 0x10 +/* Return if disabling Guard/Ref/App checking is required for error injection */ +#define BG_ERR_CHECK 0x20 + +/** + * lpfc_bg_err_inject - Determine if we should inject an error + * @phba: The Hba for which this call is being executed. * @sc: The SCSI command to examine * @reftag: (out) BlockGuard reference tag for transmitted data * @apptag: (out) BlockGuard application tag for transmitted data * @new_guard (in) Value to replace CRC with if needed * - * Returns (1) if error injection was performed, (0) otherwise - */ + * Returns BG_ERR_* bit mask or 0 if request ignored + **/ static int lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, uint32_t *reftag, uint16_t *apptag, uint32_t new_guard) { struct scatterlist *sgpe; /* s/g prot entry */ struct scatterlist *sgde; /* s/g data entry */ - struct scsi_dif_tuple *src; + struct lpfc_scsi_buf *lpfc_cmd = NULL; + struct scsi_dif_tuple *src = NULL; + struct lpfc_nodelist *ndlp; + struct lpfc_rport_data *rdata; uint32_t op = scsi_get_prot_op(sc); uint32_t blksize; uint32_t numblks; sector_t lba; int rc = 0; + int blockoff = 0; if (op == SCSI_PROT_NORMAL) return 0; + sgpe = scsi_prot_sglist(sc); + sgde = scsi_sglist(sc); lba = scsi_get_lba(sc); + + /* First check if we need to match the LBA */ if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { blksize = lpfc_cmd_blksize(sc); numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; @@ -1314,167 +1442,404 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, if ((phba->lpfc_injerr_lba < lba) || (phba->lpfc_injerr_lba >= (lba + numblks))) return 0; + if (sgpe) { + blockoff = phba->lpfc_injerr_lba - lba; + numblks = sg_dma_len(sgpe) / + sizeof(struct scsi_dif_tuple); + if (numblks < blockoff) + blockoff = numblks; + } } - sgpe = scsi_prot_sglist(sc); - sgde = scsi_sglist(sc); + /* Next check if we need to match the remote NPortID or WWPN */ + rdata = lpfc_rport_data_from_scsi_device(sc->device); + if (rdata && rdata->pnode) { + ndlp = rdata->pnode; + + /* Make sure we have the right NPortID if one is specified */ + if (phba->lpfc_injerr_nportid && + (phba->lpfc_injerr_nportid != ndlp->nlp_DID)) + return 0; - /* Should we change the Reference Tag */ - if (reftag) { /* - * If we are SCSI_PROT_WRITE_STRIP, the protection data is - * being stripped from the wire, thus it doesn't matter. + * Make sure we have the right WWPN if one is specified. + * wwn[0] should be a non-zero NAA in a good WWPN. */ - if ((op == SCSI_PROT_WRITE_PASS) || - (op == SCSI_PROT_WRITE_INSERT)) { - if (phba->lpfc_injerr_wref_cnt) { + if (phba->lpfc_injerr_wwpn.u.wwn[0] && + (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn, + sizeof(struct lpfc_name)) != 0)) + return 0; + } + + /* Setup a ptr to the protection data if the SCSI host provides it */ + if (sgpe) { + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + src += blockoff; + lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble; + } + + /* Should we change the Reference Tag */ + if (reftag) { + if (phba->lpfc_injerr_wref_cnt) { + switch (op) { + case SCSI_PROT_WRITE_PASS: + if (src) { + /* + * For WRITE_PASS, force the error + * to be sent on the wire. It should + * be detected by the Target. + * If blockoff != 0 error will be + * inserted in middle of the IO. + */ + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9076 BLKGRD: Injecting reftag error: " + "write lba x%lx + x%x oldrefTag x%x\n", + (unsigned long)lba, blockoff, + be32_to_cpu(src->ref_tag)); + + /* + * Save the old ref_tag so we can + * restore it on completion. + */ + if (lpfc_cmd) { + lpfc_cmd->prot_data_type = + LPFC_INJERR_REFTAG; + lpfc_cmd->prot_data_segment = + src; + lpfc_cmd->prot_data = + src->ref_tag; + } + src->ref_tag = cpu_to_be32(0xDEADBEEF); + phba->lpfc_injerr_wref_cnt--; + if (phba->lpfc_injerr_wref_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_TGT | BG_ERR_CHECK; + + break; + } + /* Drop thru */ + case SCSI_PROT_WRITE_INSERT: + /* + * For WRITE_INSERT, force the error + * to be sent on the wire. It should be + * detected by the Target. + */ /* DEADBEEF will be the reftag on the wire */ *reftag = 0xDEADBEEF; phba->lpfc_injerr_wref_cnt--; - phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; - rc = 1; + if (phba->lpfc_injerr_wref_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_TGT | BG_ERR_CHECK; lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9081 BLKGRD: Injecting reftag error: " + "9078 BLKGRD: Injecting reftag error: " "write lba x%lx\n", (unsigned long)lba); + break; + case SCSI_PROT_WRITE_STRIP: + /* + * For WRITE_STRIP and WRITE_PASS, + * force the error on data + * being copied from SLI-Host to SLI-Port. + */ + *reftag = 0xDEADBEEF; + phba->lpfc_injerr_wref_cnt--; + if (phba->lpfc_injerr_wref_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_INIT; + + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9077 BLKGRD: Injecting reftag error: " + "write lba x%lx\n", (unsigned long)lba); + break; } - } else { - if (phba->lpfc_injerr_rref_cnt) { + } + if (phba->lpfc_injerr_rref_cnt) { + switch (op) { + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_READ_PASS: + /* + * For READ_STRIP and READ_PASS, force the + * error on data being read off the wire. It + * should force an IO error to the driver. + */ *reftag = 0xDEADBEEF; phba->lpfc_injerr_rref_cnt--; - phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; - rc = 1; + if (phba->lpfc_injerr_rref_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_INIT; lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9076 BLKGRD: Injecting reftag error: " + "9079 BLKGRD: Injecting reftag error: " "read lba x%lx\n", (unsigned long)lba); + break; } } } /* Should we change the Application Tag */ if (apptag) { - /* - * If we are SCSI_PROT_WRITE_STRIP, the protection data is - * being stripped from the wire, thus it doesn't matter. - */ - if ((op == SCSI_PROT_WRITE_PASS) || - (op == SCSI_PROT_WRITE_INSERT)) { - if (phba->lpfc_injerr_wapp_cnt) { + if (phba->lpfc_injerr_wapp_cnt) { + switch (op) { + case SCSI_PROT_WRITE_PASS: + if (src) { + /* + * For WRITE_PASS, force the error + * to be sent on the wire. It should + * be detected by the Target. + * If blockoff != 0 error will be + * inserted in middle of the IO. + */ + + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9080 BLKGRD: Injecting apptag error: " + "write lba x%lx + x%x oldappTag x%x\n", + (unsigned long)lba, blockoff, + be16_to_cpu(src->app_tag)); + /* + * Save the old app_tag so we can + * restore it on completion. + */ + if (lpfc_cmd) { + lpfc_cmd->prot_data_type = + LPFC_INJERR_APPTAG; + lpfc_cmd->prot_data_segment = + src; + lpfc_cmd->prot_data = + src->app_tag; + } + src->app_tag = cpu_to_be16(0xDEAD); + phba->lpfc_injerr_wapp_cnt--; + if (phba->lpfc_injerr_wapp_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_TGT | BG_ERR_CHECK; + break; + } + /* Drop thru */ + case SCSI_PROT_WRITE_INSERT: + /* + * For WRITE_INSERT, force the + * error to be sent on the wire. It should be + * detected by the Target. + */ /* DEAD will be the apptag on the wire */ *apptag = 0xDEAD; phba->lpfc_injerr_wapp_cnt--; - phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; - rc = 1; + if (phba->lpfc_injerr_wapp_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_TGT | BG_ERR_CHECK; lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9077 BLKGRD: Injecting apptag error: " + "0813 BLKGRD: Injecting apptag error: " "write lba x%lx\n", (unsigned long)lba); + break; + case SCSI_PROT_WRITE_STRIP: + /* + * For WRITE_STRIP and WRITE_PASS, + * force the error on data + * being copied from SLI-Host to SLI-Port. + */ + *apptag = 0xDEAD; + phba->lpfc_injerr_wapp_cnt--; + if (phba->lpfc_injerr_wapp_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_INIT; + + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "0812 BLKGRD: Injecting apptag error: " + "write lba x%lx\n", (unsigned long)lba); + break; } - } else { - if (phba->lpfc_injerr_rapp_cnt) { + } + if (phba->lpfc_injerr_rapp_cnt) { + switch (op) { + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_READ_PASS: + /* + * For READ_STRIP and READ_PASS, force the + * error on data being read off the wire. It + * should force an IO error to the driver. + */ *apptag = 0xDEAD; phba->lpfc_injerr_rapp_cnt--; - phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; - rc = 1; + if (phba->lpfc_injerr_rapp_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_INIT; lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9078 BLKGRD: Injecting apptag error: " + "0814 BLKGRD: Injecting apptag error: " "read lba x%lx\n", (unsigned long)lba); + break; } } } + /* Should we change the Guard Tag */ + if (new_guard) { + if (phba->lpfc_injerr_wgrd_cnt) { + switch (op) { + case SCSI_PROT_WRITE_PASS: + rc = BG_ERR_CHECK; + /* Drop thru */ + + case SCSI_PROT_WRITE_INSERT: + /* + * For WRITE_INSERT, force the + * error to be sent on the wire. It should be + * detected by the Target. + */ + phba->lpfc_injerr_wgrd_cnt--; + if (phba->lpfc_injerr_wgrd_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } - /* - * If we are SCSI_PROT_WRITE_INSERT, the protection data is - * being on the wire is being fully generated on the HBA. - * The host cannot change it or force an error. - */ - if (((op == SCSI_PROT_WRITE_STRIP) || - (op == SCSI_PROT_WRITE_PASS)) && - phba->lpfc_injerr_wgrd_cnt) { - if (sgpe) { - src = (struct scsi_dif_tuple *)sg_virt(sgpe); - /* - * Just inject an error in the first - * prot block. - */ - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9079 BLKGRD: Injecting guard error: " - "write lba x%lx oldGuard x%x refTag x%x\n", - (unsigned long)lba, src->guard_tag, - src->ref_tag); + rc |= BG_ERR_TGT | BG_ERR_SWAP; + /* Signals the caller to swap CRC->CSUM */ - src->guard_tag = (uint16_t)new_guard; - phba->lpfc_injerr_wgrd_cnt--; - phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; - rc = 1; + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "0817 BLKGRD: Injecting guard error: " + "write lba x%lx\n", (unsigned long)lba); + break; + case SCSI_PROT_WRITE_STRIP: + /* + * For WRITE_STRIP and WRITE_PASS, + * force the error on data + * being copied from SLI-Host to SLI-Port. + */ + phba->lpfc_injerr_wgrd_cnt--; + if (phba->lpfc_injerr_wgrd_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } - } else { - blksize = lpfc_cmd_blksize(sc); - /* - * Jump past the first data block - * and inject an error in the - * prot data. The prot data is already - * embedded after the regular data. - */ - src = (struct scsi_dif_tuple *) - (sg_virt(sgde) + blksize); + rc = BG_ERR_INIT | BG_ERR_SWAP; + /* Signals the caller to swap CRC->CSUM */ - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9080 BLKGRD: Injecting guard error: " - "write lba x%lx oldGuard x%x refTag x%x\n", - (unsigned long)lba, src->guard_tag, - src->ref_tag); - - src->guard_tag = (uint16_t)new_guard; - phba->lpfc_injerr_wgrd_cnt--; - phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; - rc = 1; + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "0816 BLKGRD: Injecting guard error: " + "write lba x%lx\n", (unsigned long)lba); + break; + } + } + if (phba->lpfc_injerr_rgrd_cnt) { + switch (op) { + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_READ_PASS: + /* + * For READ_STRIP and READ_PASS, force the + * error on data being read off the wire. It + * should force an IO error to the driver. + */ + phba->lpfc_injerr_rgrd_cnt--; + if (phba->lpfc_injerr_rgrd_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + + rc = BG_ERR_INIT | BG_ERR_SWAP; + /* Signals the caller to swap CRC->CSUM */ + + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "0818 BLKGRD: Injecting guard error: " + "read lba x%lx\n", (unsigned long)lba); + } } } + return rc; } #endif -/* - * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it +/** + * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with + * the specified SCSI command. + * @phba: The Hba for which this call is being executed. * @sc: The SCSI command to examine * @txopt: (out) BlockGuard operation for transmitted data * @rxopt: (out) BlockGuard operation for received data * * Returns: zero on success; non-zero if tx and/or rx op cannot be determined * - */ + **/ static int lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, uint8_t *txop, uint8_t *rxop) { - uint8_t guard_type = scsi_host_get_guard(sc->device->host); uint8_t ret = 0; - if (guard_type == SHOST_DIX_GUARD_IP) { + if (lpfc_cmd_guard_csum(sc)) { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: - *txop = BG_OP_IN_CSUM_OUT_NODIF; *rxop = BG_OP_IN_NODIF_OUT_CSUM; + *txop = BG_OP_IN_CSUM_OUT_NODIF; break; case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: - *txop = BG_OP_IN_NODIF_OUT_CRC; *rxop = BG_OP_IN_CRC_OUT_NODIF; + *txop = BG_OP_IN_NODIF_OUT_CRC; break; case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: - *txop = BG_OP_IN_CSUM_OUT_CRC; *rxop = BG_OP_IN_CRC_OUT_CSUM; + *txop = BG_OP_IN_CSUM_OUT_CRC; break; case SCSI_PROT_NORMAL: @@ -1490,20 +1855,20 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: - *txop = BG_OP_IN_NODIF_OUT_CRC; *rxop = BG_OP_IN_CRC_OUT_NODIF; + *txop = BG_OP_IN_NODIF_OUT_CRC; break; case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: - *txop = BG_OP_IN_CRC_OUT_CRC; *rxop = BG_OP_IN_CRC_OUT_CRC; + *txop = BG_OP_IN_CRC_OUT_CRC; break; case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: - *txop = BG_OP_IN_CRC_OUT_NODIF; *rxop = BG_OP_IN_NODIF_OUT_CRC; + *txop = BG_OP_IN_CRC_OUT_NODIF; break; case SCSI_PROT_NORMAL: @@ -1519,8 +1884,87 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, return ret; } -/* - * This function sets up buffer list for protection groups of +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +/** + * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with + * the specified SCSI command in order to force a guard tag error. + * @phba: The Hba for which this call is being executed. + * @sc: The SCSI command to examine + * @txopt: (out) BlockGuard operation for transmitted data + * @rxopt: (out) BlockGuard operation for received data + * + * Returns: zero on success; non-zero if tx and/or rx op cannot be determined + * + **/ +static int +lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, + uint8_t *txop, uint8_t *rxop) +{ + uint8_t ret = 0; + + if (lpfc_cmd_guard_csum(sc)) { + switch (scsi_get_prot_op(sc)) { + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + *rxop = BG_OP_IN_NODIF_OUT_CRC; + *txop = BG_OP_IN_CRC_OUT_NODIF; + break; + + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + *rxop = BG_OP_IN_CSUM_OUT_NODIF; + *txop = BG_OP_IN_NODIF_OUT_CSUM; + break; + + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + *rxop = BG_OP_IN_CSUM_OUT_CRC; + *txop = BG_OP_IN_CRC_OUT_CSUM; + break; + + case SCSI_PROT_NORMAL: + default: + break; + + } + } else { + switch (scsi_get_prot_op(sc)) { + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + *rxop = BG_OP_IN_CSUM_OUT_NODIF; + *txop = BG_OP_IN_NODIF_OUT_CSUM; + break; + + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + *rxop = BG_OP_IN_CSUM_OUT_CSUM; + *txop = BG_OP_IN_CSUM_OUT_CSUM; + break; + + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + *rxop = BG_OP_IN_NODIF_OUT_CSUM; + *txop = BG_OP_IN_CSUM_OUT_NODIF; + break; + + case SCSI_PROT_NORMAL: + default: + break; + } + } + + return ret; +} +#endif + +/** + * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data + * @phba: The Hba for which this call is being executed. + * @sc: pointer to scsi command we're working on + * @bpl: pointer to buffer list for protection groups + * @datacnt: number of segments of data that have been dma mapped + * + * This function sets up BPL buffer list for protection groups of * type LPFC_PG_TYPE_NO_DIF * * This is usually used when the HBA is instructed to generate @@ -1539,12 +1983,11 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, * |more Data BDE's ... (opt)| * +-------------------------+ * - * @sc: pointer to scsi command we're working on - * @bpl: pointer to buffer list for protection groups - * @datacnt: number of segments of data that have been dma mapped * * Note: Data s/g buffers have been dma mapped - */ + * + * Returns the number of BDEs added to the BPL. + **/ static int lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct ulp_bde64 *bpl, int datasegcnt) @@ -1555,6 +1998,10 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, dma_addr_t physaddr; int i = 0, num_bde = 0, status; int datadir = sc->sc_data_direction; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t rc; +#endif + uint32_t checking = 1; uint32_t reftag; unsigned blksize; uint8_t txop, rxop; @@ -1565,11 +2012,16 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* extract some info from the scsi command for pde*/ blksize = lpfc_cmd_blksize(sc); - reftag = scsi_get_lba(sc) & 0xffffffff; + reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - /* reftag is the only error we can inject here */ - lpfc_bg_err_inject(phba, sc, &reftag, 0, 0); + rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); + if (rc) { + if (rc & BG_ERR_SWAP) + lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); + if (rc & BG_ERR_CHECK) + checking = 0; + } #endif /* setup PDE5 with what we have */ @@ -1591,9 +2043,21 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); bf_set(pde6_optx, pde6, txop); bf_set(pde6_oprx, pde6, rxop); + + /* + * We only need to check the data on READs, for WRITEs + * protection data is automatically generated, not checked. + */ if (datadir == DMA_FROM_DEVICE) { - bf_set(pde6_ce, pde6, 1); - bf_set(pde6_re, pde6, 1); + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) + bf_set(pde6_ce, pde6, checking); + else + bf_set(pde6_ce, pde6, 0); + + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) + bf_set(pde6_re, pde6, checking); + else + bf_set(pde6_re, pde6, 0); } bf_set(pde6_ai, pde6, 1); bf_set(pde6_ae, pde6, 0); @@ -1627,9 +2091,16 @@ out: return num_bde; } -/* - * This function sets up buffer list for protection groups of - * type LPFC_PG_TYPE_DIF_BUF +/** + * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data + * @phba: The Hba for which this call is being executed. + * @sc: pointer to scsi command we're working on + * @bpl: pointer to buffer list for protection groups + * @datacnt: number of segments of data that have been dma mapped + * @protcnt: number of segment of protection data that have been dma mapped + * + * This function sets up BPL buffer list for protection groups of + * type LPFC_PG_TYPE_DIF * * This is usually used when DIFs are in their own buffers, * separate from the data. The HBA can then by instructed @@ -1654,14 +2125,11 @@ out: * | ... | * +-------------------------+ * - * @sc: pointer to scsi command we're working on - * @bpl: pointer to buffer list for protection groups - * @datacnt: number of segments of data that have been dma mapped - * @protcnt: number of segment of protection data that have been dma mapped - * * Note: It is assumed that both data and protection s/g buffers have been * mapped for DMA - */ + * + * Returns the number of BDEs added to the BPL. + **/ static int lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct ulp_bde64 *bpl, int datacnt, int protcnt) @@ -1681,6 +2149,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, int datadir = sc->sc_data_direction; unsigned char pgdone = 0, alldone = 0; unsigned blksize; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t rc; +#endif + uint32_t checking = 1; uint32_t reftag; uint8_t txop, rxop; int num_bde = 0; @@ -1701,15 +2173,24 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* extract some info from the scsi command */ blksize = lpfc_cmd_blksize(sc); - reftag = scsi_get_lba(sc) & 0xffffffff; + reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - /* reftag / guard tag are the only errors we can inject here */ - lpfc_bg_err_inject(phba, sc, &reftag, 0, 0xDEAD); + rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); + if (rc) { + if (rc & BG_ERR_SWAP) + lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); + if (rc & BG_ERR_CHECK) + checking = 0; + } #endif split_offset = 0; do { + /* Check to see if we ran out of space */ + if (num_bde >= (phba->cfg_total_seg_cnt - 2)) + return num_bde + 3; + /* setup PDE5 with what we have */ pde5 = (struct lpfc_pde5 *) bpl; memset(pde5, 0, sizeof(struct lpfc_pde5)); @@ -1729,8 +2210,17 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); bf_set(pde6_optx, pde6, txop); bf_set(pde6_oprx, pde6, rxop); - bf_set(pde6_ce, pde6, 1); - bf_set(pde6_re, pde6, 1); + + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) + bf_set(pde6_ce, pde6, checking); + else + bf_set(pde6_ce, pde6, 0); + + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) + bf_set(pde6_re, pde6, checking); + else + bf_set(pde6_re, pde6, 0); + bf_set(pde6_ai, pde6, 1); bf_set(pde6_ae, pde6, 0); bf_set(pde6_apptagval, pde6, 0); @@ -1778,6 +2268,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, pgdone = 0; subtotal = 0; /* total bytes processed for current prot grp */ while (!pgdone) { + /* Check to see if we ran out of space */ + if (num_bde >= phba->cfg_total_seg_cnt) + return num_bde + 1; + if (!sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9065 BLKGRD:%s Invalid data segment\n", @@ -1852,13 +2346,405 @@ out: return num_bde; } -/* +/** + * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data + * @phba: The Hba for which this call is being executed. + * @sc: pointer to scsi command we're working on + * @sgl: pointer to buffer list for protection groups + * @datacnt: number of segments of data that have been dma mapped + * + * This function sets up SGL buffer list for protection groups of + * type LPFC_PG_TYPE_NO_DIF + * + * This is usually used when the HBA is instructed to generate + * DIFs and insert them into data stream (or strip DIF from + * incoming data stream) + * + * The buffer list consists of just one protection group described + * below: + * +-------------------------+ + * start of prot group --> | DI_SEED | + * +-------------------------+ + * | Data SGE | + * +-------------------------+ + * |more Data SGE's ... (opt)| + * +-------------------------+ + * + * + * Note: Data s/g buffers have been dma mapped + * + * Returns the number of SGEs added to the SGL. + **/ +static int +lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, + struct sli4_sge *sgl, int datasegcnt) +{ + struct scatterlist *sgde = NULL; /* s/g data entry */ + struct sli4_sge_diseed *diseed = NULL; + dma_addr_t physaddr; + int i = 0, num_sge = 0, status; + uint32_t reftag; + unsigned blksize; + uint8_t txop, rxop; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t rc; +#endif + uint32_t checking = 1; + uint32_t dma_len; + uint32_t dma_offset = 0; + + status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); + if (status) + goto out; + + /* extract some info from the scsi command for pde*/ + blksize = lpfc_cmd_blksize(sc); + reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); + if (rc) { + if (rc & BG_ERR_SWAP) + lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); + if (rc & BG_ERR_CHECK) + checking = 0; + } +#endif + + /* setup DISEED with what we have */ + diseed = (struct sli4_sge_diseed *) sgl; + memset(diseed, 0, sizeof(struct sli4_sge_diseed)); + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); + + /* Endianness conversion if necessary */ + diseed->ref_tag = cpu_to_le32(reftag); + diseed->ref_tag_tran = diseed->ref_tag; + + /* + * We only need to check the data on READs, for WRITEs + * protection data is automatically generated, not checked. + */ + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) + bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); + + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) + bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_re, diseed, 0); + } + + /* setup DISEED with the rest of the info */ + bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); + bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); + + bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); + bf_set(lpfc_sli4_sge_dif_me, diseed, 0); + + /* Endianness conversion if necessary for DISEED */ + diseed->word2 = cpu_to_le32(diseed->word2); + diseed->word3 = cpu_to_le32(diseed->word3); + + /* advance bpl and increment sge count */ + num_sge++; + sgl++; + + /* assumption: caller has already run dma_map_sg on command data */ + scsi_for_each_sg(sc, sgde, datasegcnt, i) { + physaddr = sg_dma_address(sgde); + dma_len = sg_dma_len(sgde); + sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); + if ((i + 1) == datasegcnt) + bf_set(lpfc_sli4_sge_last, sgl, 1); + else + bf_set(lpfc_sli4_sge_last, sgl, 0); + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + + sgl->sge_len = cpu_to_le32(dma_len); + dma_offset += dma_len; + + sgl++; + num_sge++; + } + +out: + return num_sge; +} + +/** + * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data + * @phba: The Hba for which this call is being executed. + * @sc: pointer to scsi command we're working on + * @sgl: pointer to buffer list for protection groups + * @datacnt: number of segments of data that have been dma mapped + * @protcnt: number of segment of protection data that have been dma mapped + * + * This function sets up SGL buffer list for protection groups of + * type LPFC_PG_TYPE_DIF + * + * This is usually used when DIFs are in their own buffers, + * separate from the data. The HBA can then by instructed + * to place the DIFs in the outgoing stream. For read operations, + * The HBA could extract the DIFs and place it in DIF buffers. + * + * The buffer list for this type consists of one or more of the + * protection groups described below: + * +-------------------------+ + * start of first prot group --> | DISEED | + * +-------------------------+ + * | DIF (Prot SGE) | + * +-------------------------+ + * | Data SGE | + * +-------------------------+ + * |more Data SGE's ... (opt)| + * +-------------------------+ + * start of new prot group --> | DISEED | + * +-------------------------+ + * | ... | + * +-------------------------+ + * + * Note: It is assumed that both data and protection s/g buffers have been + * mapped for DMA + * + * Returns the number of SGEs added to the SGL. + **/ +static int +lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, + struct sli4_sge *sgl, int datacnt, int protcnt) +{ + struct scatterlist *sgde = NULL; /* s/g data entry */ + struct scatterlist *sgpe = NULL; /* s/g prot entry */ + struct sli4_sge_diseed *diseed = NULL; + dma_addr_t dataphysaddr, protphysaddr; + unsigned short curr_data = 0, curr_prot = 0; + unsigned int split_offset; + unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; + unsigned int protgrp_blks, protgrp_bytes; + unsigned int remainder, subtotal; + int status; + unsigned char pgdone = 0, alldone = 0; + unsigned blksize; + uint32_t reftag; + uint8_t txop, rxop; + uint32_t dma_len; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t rc; +#endif + uint32_t checking = 1; + uint32_t dma_offset = 0; + int num_sge = 0; + + sgpe = scsi_prot_sglist(sc); + sgde = scsi_sglist(sc); + + if (!sgpe || !sgde) { + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "9082 Invalid s/g entry: data=0x%p prot=0x%p\n", + sgpe, sgde); + return 0; + } + + status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); + if (status) + goto out; + + /* extract some info from the scsi command */ + blksize = lpfc_cmd_blksize(sc); + reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); + if (rc) { + if (rc & BG_ERR_SWAP) + lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); + if (rc & BG_ERR_CHECK) + checking = 0; + } +#endif + + split_offset = 0; + do { + /* Check to see if we ran out of space */ + if (num_sge >= (phba->cfg_total_seg_cnt - 2)) + return num_sge + 3; + + /* setup DISEED with what we have */ + diseed = (struct sli4_sge_diseed *) sgl; + memset(diseed, 0, sizeof(struct sli4_sge_diseed)); + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); + + /* Endianness conversion if necessary */ + diseed->ref_tag = cpu_to_le32(reftag); + diseed->ref_tag_tran = diseed->ref_tag; + + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) { + bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); + + } else { + bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); + /* + * When in this mode, the hardware will replace + * the guard tag from the host with a + * newly generated good CRC for the wire. + * Switch to raw mode here to avoid this + * behavior. What the host sends gets put on the wire. + */ + if (txop == BG_OP_IN_CRC_OUT_CRC) { + txop = BG_OP_RAW_MODE; + rxop = BG_OP_RAW_MODE; + } + } + + + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) + bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_re, diseed, 0); + + /* setup DISEED with the rest of the info */ + bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); + bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); + + bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); + bf_set(lpfc_sli4_sge_dif_me, diseed, 0); + + /* Endianness conversion if necessary for DISEED */ + diseed->word2 = cpu_to_le32(diseed->word2); + diseed->word3 = cpu_to_le32(diseed->word3); + + /* advance sgl and increment bde count */ + num_sge++; + sgl++; + + /* setup the first BDE that points to protection buffer */ + protphysaddr = sg_dma_address(sgpe) + protgroup_offset; + protgroup_len = sg_dma_len(sgpe) - protgroup_offset; + + /* must be integer multiple of the DIF block length */ + BUG_ON(protgroup_len % 8); + + /* Now setup DIF SGE */ + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF); + sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr)); + sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr)); + sgl->word2 = cpu_to_le32(sgl->word2); + + protgrp_blks = protgroup_len / 8; + protgrp_bytes = protgrp_blks * blksize; + + /* check if DIF SGE is crossing the 4K boundary; if so split */ + if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) { + protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff); + protgroup_offset += protgroup_remainder; + protgrp_blks = protgroup_remainder / 8; + protgrp_bytes = protgrp_blks * blksize; + } else { + protgroup_offset = 0; + curr_prot++; + } + + num_sge++; + + /* setup SGE's for data blocks associated with DIF data */ + pgdone = 0; + subtotal = 0; /* total bytes processed for current prot grp */ + while (!pgdone) { + /* Check to see if we ran out of space */ + if (num_sge >= phba->cfg_total_seg_cnt) + return num_sge + 1; + + if (!sgde) { + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9086 BLKGRD:%s Invalid data segment\n", + __func__); + return 0; + } + sgl++; + dataphysaddr = sg_dma_address(sgde) + split_offset; + + remainder = sg_dma_len(sgde) - split_offset; + + if ((subtotal + remainder) <= protgrp_bytes) { + /* we can use this whole buffer */ + dma_len = remainder; + split_offset = 0; + + if ((subtotal + remainder) == protgrp_bytes) + pgdone = 1; + } else { + /* must split this buffer with next prot grp */ + dma_len = protgrp_bytes - subtotal; + split_offset += dma_len; + } + + subtotal += dma_len; + + sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr)); + bf_set(lpfc_sli4_sge_last, sgl, 0); + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + + sgl->sge_len = cpu_to_le32(dma_len); + dma_offset += dma_len; + + num_sge++; + curr_data++; + + if (split_offset) + break; + + /* Move to the next s/g segment if possible */ + sgde = sg_next(sgde); + } + + if (protgroup_offset) { + /* update the reference tag */ + reftag += protgrp_blks; + sgl++; + continue; + } + + /* are we done ? */ + if (curr_prot == protcnt) { + bf_set(lpfc_sli4_sge_last, sgl, 1); + alldone = 1; + } else if (curr_prot < protcnt) { + /* advance to next prot buffer */ + sgpe = sg_next(sgpe); + sgl++; + + /* update the reference tag */ + reftag += protgrp_blks; + } else { + /* if we're here, we have a bug */ + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9085 BLKGRD: bug in %s\n", __func__); + } + + } while (!alldone); + +out: + + return num_sge; +} + +/** + * lpfc_prot_group_type - Get prtotection group type of SCSI command + * @phba: The Hba for which this call is being executed. + * @sc: pointer to scsi command we're working on + * * Given a SCSI command that supports DIF, determine composition of protection * groups involved in setting up buffer lists * - * Returns: - * for DIF (for both read and write) - * */ + * Returns: Protection group type (with or without DIF) + * + **/ static int lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) { @@ -1877,21 +2763,67 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) ret = LPFC_PG_TYPE_DIF_BUF; break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "9021 Unsupported protection op:%d\n", op); + if (phba) + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "9021 Unsupported protection op:%d\n", + op); break; } - return ret; } -/* +/** + * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be adjusted. + * + * Adjust the data length to account for how much data + * is actually on the wire. + * + * returns the adjusted data length + **/ +static int +lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, + struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scsi_cmnd *sc = lpfc_cmd->pCmd; + int fcpdl; + + fcpdl = scsi_bufflen(sc); + + /* Check if there is protection data on the wire */ + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + /* Read check for protection data */ + if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) + return fcpdl; + + } else { + /* Write check for protection data */ + if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) + return fcpdl; + } + + /* + * If we are in DIF Type 1 mode every data block has a 8 byte + * DIF (trailer) attached to it. Must ajust FCP data length + * to account for the protection data. + */ + fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8; + + return fcpdl; +} + +/** + * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be prep'ed. + * * This is the protection/DIF aware version of * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the * two functions eventually, but for now, it's here - */ + **/ static int -lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, +lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; @@ -1901,8 +2833,7 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, uint32_t num_bde = 0; int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; - int diflen, fcpdl; - unsigned blksize; + int fcpdl; /* * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd @@ -1923,28 +2854,28 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, return 1; lpfc_cmd->seg_cnt = datasegcnt; - if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9067 BLKGRD: %s: Too many sg segments" - " from dma_map_sg. Config %d, seg_cnt" - " %d\n", - __func__, phba->cfg_sg_seg_cnt, - lpfc_cmd->seg_cnt); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* First check if data segment count from SCSI Layer is good */ + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) + goto err; prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); switch (prot_group_type) { case LPFC_PG_TYPE_NO_DIF: + + /* Here we need to add a PDE5 and PDE6 to the count */ + if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) + goto err; + num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, datasegcnt); /* we should have 2 or more entries in buffer list */ if (num_bde < 2) goto err; break; - case LPFC_PG_TYPE_DIF_BUF:{ + + case LPFC_PG_TYPE_DIF_BUF: /* * This type indicates that protection buffers are * passed to the driver, so that needs to be prepared @@ -1959,31 +2890,28 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, } lpfc_cmd->prot_seg_cnt = protsegcnt; - if (lpfc_cmd->prot_seg_cnt - > phba->cfg_prot_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9068 BLKGRD: %s: Too many prot sg " - "segments from dma_map_sg. Config %d," - "prot_seg_cnt %d\n", __func__, - phba->cfg_prot_sg_seg_cnt, - lpfc_cmd->prot_seg_cnt); - dma_unmap_sg(&phba->pcidev->dev, - scsi_prot_sglist(scsi_cmnd), - scsi_prot_sg_count(scsi_cmnd), - datadir); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* + * There is a minimun of 4 BPLs used for every + * protection data segment. + */ + if ((lpfc_cmd->prot_seg_cnt * 4) > + (phba->cfg_total_seg_cnt - 2)) + goto err; num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, datasegcnt, protsegcnt); /* we should have 3 or more entries in buffer list */ - if (num_bde < 3) + if ((num_bde < 3) || + (num_bde > phba->cfg_total_seg_cnt)) goto err; break; - } + case LPFC_PG_TYPE_INVALID: default: + scsi_dma_unmap(scsi_cmnd); + lpfc_cmd->seg_cnt = 0; + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "9022 Unexpected protection group %i\n", prot_group_type); @@ -2002,18 +2930,7 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, iocb_cmd->ulpBdeCount = 1; iocb_cmd->ulpLe = 1; - fcpdl = scsi_bufflen(scsi_cmnd); - - if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) { - /* - * We are in DIF Type 1 mode - * Every data block has a 8 byte DIF (trailer) - * attached to it. Must ajust FCP data length - */ - blksize = lpfc_cmd_blksize(scsi_cmnd); - diflen = (fcpdl / blksize) * 8; - fcpdl += diflen; - } + fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); /* @@ -2024,14 +2941,234 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, return 0; err: + if (lpfc_cmd->seg_cnt) + scsi_dma_unmap(scsi_cmnd); + if (lpfc_cmd->prot_seg_cnt) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + scsi_cmnd->sc_data_direction); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "9023 Could not setup all needed BDE's" - "prot_group_type=%d, num_bde=%d\n", + "9023 Cannot setup S/G List for HBA" + "IO segs %d/%d BPL %d SCSI %d: %d %d\n", + lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, + phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, prot_group_type, num_bde); + + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->prot_seg_cnt = 0; return 1; } /* + * This function calcuates the T10 DIF guard tag + * on the specified data using a CRC algorithmn + * using crc_t10dif. + */ +uint16_t +lpfc_bg_crc(uint8_t *data, int count) +{ + uint16_t crc = 0; + uint16_t x; + + crc = crc_t10dif(data, count); + x = cpu_to_be16(crc); + return x; +} + +/* + * This function calcuates the T10 DIF guard tag + * on the specified data using a CSUM algorithmn + * using ip_compute_csum. + */ +uint16_t +lpfc_bg_csum(uint8_t *data, int count) +{ + uint16_t ret; + + ret = ip_compute_csum(data, count); + return ret; +} + +/* + * This function examines the protection data to try to determine + * what type of T10-DIF error occurred. + */ +void +lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scatterlist *sgpe; /* s/g prot entry */ + struct scatterlist *sgde; /* s/g data entry */ + struct scsi_cmnd *cmd = lpfc_cmd->pCmd; + struct scsi_dif_tuple *src = NULL; + uint8_t *data_src = NULL; + uint16_t guard_tag, guard_type; + uint16_t start_app_tag, app_tag; + uint32_t start_ref_tag, ref_tag; + int prot, protsegcnt; + int err_type, len, data_len; + int chk_ref, chk_app, chk_guard; + uint16_t sum; + unsigned blksize; + + err_type = BGS_GUARD_ERR_MASK; + sum = 0; + guard_tag = 0; + + /* First check to see if there is protection data to examine */ + prot = scsi_get_prot_op(cmd); + if ((prot == SCSI_PROT_READ_STRIP) || + (prot == SCSI_PROT_WRITE_INSERT) || + (prot == SCSI_PROT_NORMAL)) + goto out; + + /* Currently the driver just supports ref_tag and guard_tag checking */ + chk_ref = 1; + chk_app = 0; + chk_guard = 0; + + /* Setup a ptr to the protection data provided by the SCSI host */ + sgpe = scsi_prot_sglist(cmd); + protsegcnt = lpfc_cmd->prot_seg_cnt; + + if (sgpe && protsegcnt) { + + /* + * We will only try to verify guard tag if the segment + * data length is a multiple of the blksize. + */ + sgde = scsi_sglist(cmd); + blksize = lpfc_cmd_blksize(cmd); + data_src = (uint8_t *)sg_virt(sgde); + data_len = sgde->length; + if ((data_len & (blksize - 1)) == 0) + chk_guard = 1; + guard_type = scsi_host_get_guard(cmd->device->host); + + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */ + start_app_tag = src->app_tag; + len = sgpe->length; + while (src && protsegcnt) { + while (len) { + + /* + * First check to see if a protection data + * check is valid + */ + if ((src->ref_tag == 0xffffffff) || + (src->app_tag == 0xffff)) { + start_ref_tag++; + goto skipit; + } + + /* First Guard Tag checking */ + if (chk_guard) { + guard_tag = src->guard_tag; + if (lpfc_cmd_guard_csum(cmd)) + sum = lpfc_bg_csum(data_src, + blksize); + else + sum = lpfc_bg_crc(data_src, + blksize); + if ((guard_tag != sum)) { + err_type = BGS_GUARD_ERR_MASK; + goto out; + } + } + + /* Reference Tag checking */ + ref_tag = be32_to_cpu(src->ref_tag); + if (chk_ref && (ref_tag != start_ref_tag)) { + err_type = BGS_REFTAG_ERR_MASK; + goto out; + } + start_ref_tag++; + + /* App Tag checking */ + app_tag = src->app_tag; + if (chk_app && (app_tag != start_app_tag)) { + err_type = BGS_APPTAG_ERR_MASK; + goto out; + } +skipit: + len -= sizeof(struct scsi_dif_tuple); + if (len < 0) + len = 0; + src++; + + data_src += blksize; + data_len -= blksize; + + /* + * Are we at the end of the Data segment? + * The data segment is only used for Guard + * tag checking. + */ + if (chk_guard && (data_len == 0)) { + chk_guard = 0; + sgde = sg_next(sgde); + if (!sgde) + goto out; + + data_src = (uint8_t *)sg_virt(sgde); + data_len = sgde->length; + if ((data_len & (blksize - 1)) == 0) + chk_guard = 1; + } + } + + /* Goto the next Protection data segment */ + sgpe = sg_next(sgpe); + if (sgpe) { + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + len = sgpe->length; + } else { + src = NULL; + } + protsegcnt--; + } + } +out: + if (err_type == BGS_GUARD_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x1); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + phba->bg_guard_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + sum, guard_tag); + + } else if (err_type == BGS_REFTAG_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x3); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + + phba->bg_reftag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + ref_tag, start_ref_tag); + + } else if (err_type == BGS_APPTAG_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x2); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + + phba->bg_apptag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9041 BLKGRD: LBA %lx app_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + app_tag, start_app_tag); + } +} + + +/* * This function checks for BlockGuard errors detected by * the HBA. In case of errors, the ASC/ASCQ fields in the * sense buffer will be set accordingly, paired with @@ -2054,12 +3191,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, uint32_t bgstat = bgf->bgstat; uint64_t failing_sector = 0; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd" - " 0x%x lba 0x%llx blk cnt 0x%x " - "bgstat=0x%x bghm=0x%x\n", - cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), - blk_rq_sectors(cmd->request), bgstat, bghm); - spin_lock(&_dump_buf_lock); if (!_dump_buf_done) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" @@ -2082,18 +3213,24 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, if (lpfc_bgs_get_invalid_prof(bgstat)) { cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid" - " BlockGuard profile. bgstat:0x%x\n", - bgstat); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9072 BLKGRD: Invalid BG Profile in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); ret = (-1); goto out; } if (lpfc_bgs_get_uninit_dif_block(bgstat)) { cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: " - "Invalid BlockGuard DIF Block. bgstat:0x%x\n", - bgstat); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9073 BLKGRD: Invalid BG PDIF Block in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); ret = (-1); goto out; } @@ -2106,8 +3243,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, cmd->result = DRIVER_SENSE << 24 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_guard_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9055 BLKGRD: guard_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9055 BLKGRD: Guard Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_reftag_err(bgstat)) { @@ -2119,8 +3260,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_reftag_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9056 BLKGRD: ref_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9056 BLKGRD: Ref Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_apptag_err(bgstat)) { @@ -2132,8 +3277,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_apptag_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9061 BLKGRD: app_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9061 BLKGRD: App Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { @@ -2147,7 +3296,21 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, cmd->sense_buffer[8] = 0; /* Information descriptor type */ cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ cmd->sense_buffer[10] = 0x80; /* Validity bit */ - bghm /= cmd->device->sector_size; + + /* bghm is a "on the wire" FC frame based count */ + switch (scsi_get_prot_op(cmd)) { + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + bghm /= cmd->device->sector_size; + break; + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + bghm /= (cmd->device->sector_size + + sizeof(struct scsi_dif_tuple)); + break; + } failing_sector = scsi_get_lba(cmd); failing_sector += bghm; @@ -2158,11 +3321,16 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, if (!ret) { /* No error was reported - problem in FW? */ - cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9057 BLKGRD: no errors reported!\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9057 BLKGRD: Unknown error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); + + /* Calcuate what type of error it was */ + lpfc_calc_bg_err(phba, lpfc_cmd); } - out: return ret; } @@ -2226,6 +3394,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); + lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); return 1; } @@ -2288,10 +3457,175 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) * we need to set word 4 of IOCB here */ iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); + + /* + * If the OAS driver feature is enabled and the lun is enabled for + * OAS, set the oas iocb related flags. + */ + if ((phba->cfg_fof) && ((struct lpfc_device_data *) + scsi_cmnd->device->hostdata)->oas_enabled) + lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS; return 0; } /** + * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be mapped. + * + * This is the protection/DIF aware version of + * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the + * two functions eventually, but for now, it's here + **/ +static int +lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, + struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; + struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; + struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl); + IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; + uint32_t num_sge = 0; + int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; + int prot_group_type = 0; + int fcpdl; + + /* + * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd + * fcp_rsp regions to the first data sge entry + */ + if (scsi_sg_count(scsi_cmnd)) { + /* + * The driver stores the segment count returned from pci_map_sg + * because this a count of dma-mappings used to map the use_sg + * pages. They are not guaranteed to be the same for those + * architectures that implement an IOMMU. + */ + datasegcnt = dma_map_sg(&phba->pcidev->dev, + scsi_sglist(scsi_cmnd), + scsi_sg_count(scsi_cmnd), datadir); + if (unlikely(!datasegcnt)) + return 1; + + sgl += 1; + /* clear the last flag in the fcp_rsp map entry */ + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + + sgl += 1; + lpfc_cmd->seg_cnt = datasegcnt; + + /* First check if data segment count from SCSI Layer is good */ + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) + goto err; + + prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); + + switch (prot_group_type) { + case LPFC_PG_TYPE_NO_DIF: + /* Here we need to add a DISEED to the count */ + if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt) + goto err; + + num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, + datasegcnt); + + /* we should have 2 or more entries in buffer list */ + if (num_sge < 2) + goto err; + break; + + case LPFC_PG_TYPE_DIF_BUF: + /* + * This type indicates that protection buffers are + * passed to the driver, so that needs to be prepared + * for DMA + */ + protsegcnt = dma_map_sg(&phba->pcidev->dev, + scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), datadir); + if (unlikely(!protsegcnt)) { + scsi_dma_unmap(scsi_cmnd); + return 1; + } + + lpfc_cmd->prot_seg_cnt = protsegcnt; + /* + * There is a minimun of 3 SGEs used for every + * protection data segment. + */ + if ((lpfc_cmd->prot_seg_cnt * 3) > + (phba->cfg_total_seg_cnt - 2)) + goto err; + + num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, + datasegcnt, protsegcnt); + + /* we should have 3 or more entries in buffer list */ + if ((num_sge < 3) || + (num_sge > phba->cfg_total_seg_cnt)) + goto err; + break; + + case LPFC_PG_TYPE_INVALID: + default: + scsi_dma_unmap(scsi_cmnd); + lpfc_cmd->seg_cnt = 0; + + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "9083 Unexpected protection group %i\n", + prot_group_type); + return 1; + } + } + + switch (scsi_get_prot_op(scsi_cmnd)) { + case SCSI_PROT_WRITE_STRIP: + case SCSI_PROT_READ_STRIP: + lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP; + break; + case SCSI_PROT_WRITE_INSERT: + case SCSI_PROT_READ_INSERT: + lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT; + break; + case SCSI_PROT_WRITE_PASS: + case SCSI_PROT_READ_PASS: + lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS; + break; + } + + fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); + fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); + + /* + * Due to difference in data length between DIF/non-DIF paths, + * we need to set word 4 of IOCB here + */ + iocb_cmd->un.fcpi.fcpi_parm = fcpdl; + + return 0; +err: + if (lpfc_cmd->seg_cnt) + scsi_dma_unmap(scsi_cmnd); + if (lpfc_cmd->prot_seg_cnt) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + scsi_cmnd->sc_data_direction); + + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "9084 Cannot setup S/G List for HBA" + "IO segs %d/%d SGL %d SCSI %d: %d %d\n", + lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, + phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, + prot_group_type, num_sge); + + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->prot_seg_cnt = 0; + return 1; +} + +/** * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be mapped. @@ -2310,6 +3644,25 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) } /** + * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer + * using BlockGuard. + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be mapped. + * + * This routine wraps the actual DMA mapping function pointer from the + * lpfc_hba struct. + * + * Return codes: + * 1 - Error + * 0 - Success + **/ +static inline int +lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +{ + return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); +} + +/** * lpfc_send_scsi_error_event - Posts an event when there is SCSI error * @phba: Pointer to hba context object. * @vport: Pointer to vport object. @@ -2501,9 +3854,15 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, } lp = (uint32_t *)cmnd->sense_buffer; - if (!scsi_status && (resp_info & RESID_UNDER) && - vport->cfg_log_verbose & LOG_FCP_UNDER) - logit = LOG_FCP_UNDER; + /* special handling for under run conditions */ + if (!scsi_status && (resp_info & RESID_UNDER)) { + /* don't log under runs if fcp set... */ + if (vport->cfg_log_verbose & LOG_FCP) + logit = LOG_FCP_ERROR; + /* unless operator says so */ + if (vport->cfg_log_verbose & LOG_FCP_UNDER) + logit = LOG_FCP_UNDER; + } lpfc_printf_vlog(vport, KERN_WARNING, logit, "9024 FCP command x%x failed: x%x SNS x%x x%x " @@ -2620,7 +3979,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, struct lpfc_nodelist *pnode = rdata->pnode; struct scsi_cmnd *cmd; int result; - struct scsi_device *tmp_sdev; int depth; unsigned long flags; struct lpfc_fast_path_event *fast_path_evt; @@ -2634,11 +3992,42 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, cmd = lpfc_cmd->pCmd; shost = cmd->device->host; - lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; + lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); lpfc_cmd->status = pIocbOut->iocb.ulpStatus; /* pick up SLI4 exhange busy status from HBA */ lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (lpfc_cmd->prot_data_type) { + struct scsi_dif_tuple *src = NULL; + + src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; + /* + * Used to restore any changes to protection + * data for error injection. + */ + switch (lpfc_cmd->prot_data_type) { + case LPFC_INJERR_REFTAG: + src->ref_tag = + lpfc_cmd->prot_data; + break; + case LPFC_INJERR_APPTAG: + src->app_tag = + (uint16_t)lpfc_cmd->prot_data; + break; + case LPFC_INJERR_GUARD: + src->guard_tag = + (uint16_t)lpfc_cmd->prot_data; + break; + default: + break; + } + + lpfc_cmd->prot_data = 0; + lpfc_cmd->prot_data_type = 0; + lpfc_cmd->prot_data_segment = NULL; + } +#endif if (pnode && NLP_CHK_NODE_ACT(pnode)) atomic_dec(&pnode->cmd_pending); @@ -2648,20 +4037,26 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_cmd->status = IOSTAT_DRIVER_REJECT; else if (lpfc_cmd->status >= IOSTAT_CNT) lpfc_cmd->status = IOSTAT_DEFAULT; - if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR - && !lpfc_cmd->fcp_rsp->rspStatus3 - && (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) - && !(phba->cfg_log_verbose & LOG_FCP_UNDER)) + if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && + !lpfc_cmd->fcp_rsp->rspStatus3 && + (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && + !(vport->cfg_log_verbose & LOG_FCP_UNDER)) logit = 0; else logit = LOG_FCP | LOG_FCP_UNDER; lpfc_printf_vlog(vport, KERN_WARNING, logit, "9030 FCP cmd x%x failed <%d/%d> " - "status: x%x result: x%x Data: x%x x%x\n", + "status: x%x result: x%x " + "sid: x%x did: x%x oxid: x%x " + "Data: x%x x%x\n", cmd->cmnd[0], cmd->device ? cmd->device->id : 0xffff, cmd->device ? cmd->device->lun : 0xffff, lpfc_cmd->status, lpfc_cmd->result, + vport->fc_myDID, + (pnode) ? pnode->nlp_DID : 0, + phba->sli_rev == LPFC_SLI_REV4 ? + lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, pIocbOut->iocb.ulpContext, lpfc_cmd->cur_iocbq.iocb.ulpIoTag); @@ -2742,8 +4137,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, * ABTS we cannot generate and RRQ. */ lpfc_set_rrq_active(phba, pnode, - lpfc_cmd->cur_iocbq.sli4_xritag, - 0, 0); + lpfc_cmd->cur_iocbq.sli4_lxritag, + 0, 0); } /* else: fall through */ default: @@ -2812,12 +4207,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, cmd->scsi_done(cmd); if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { + spin_lock_irqsave(&phba->hbalock, flags); + lpfc_cmd->pCmd = NULL; + spin_unlock_irqrestore(&phba->hbalock, flags); + /* * If there is a thread waiting for command completion * wake up the thread. */ spin_lock_irqsave(shost->host_lock, flags); - lpfc_cmd->pCmd = NULL; if (lpfc_cmd->waitq) wake_up(lpfc_cmd->waitq); spin_unlock_irqrestore(shost->host_lock, flags); @@ -2825,38 +4223,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, return; } - if (!result) - lpfc_rampup_queue_depth(vport, queue_depth); - - /* - * Check for queue full. If the lun is reporting queue full, then - * back off the lun queue depth to prevent target overloads. - */ - if (result == SAM_STAT_TASK_SET_FULL && pnode && - NLP_CHK_NODE_ACT(pnode)) { - shost_for_each_device(tmp_sdev, shost) { - if (tmp_sdev->id != scsi_id) - continue; - depth = scsi_track_queue_full(tmp_sdev, - tmp_sdev->queue_depth-1); - if (depth <= 0) - continue; - lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, - "0711 detected queue full - lun queue " - "depth adjusted to %d.\n", depth); - lpfc_send_sdev_queuedepth_change_event(phba, vport, - pnode, - tmp_sdev->lun, - depth+1, depth); - } - } + spin_lock_irqsave(&phba->hbalock, flags); + lpfc_cmd->pCmd = NULL; + spin_unlock_irqrestore(&phba->hbalock, flags); /* * If there is a thread waiting for command completion * wake up the thread. */ spin_lock_irqsave(shost->host_lock, flags); - lpfc_cmd->pCmd = NULL; if (lpfc_cmd->waitq) wake_up(lpfc_cmd->waitq); spin_unlock_irqrestore(shost->host_lock, flags); @@ -2902,6 +4277,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); int datadir = scsi_cmnd->sc_data_direction; char tag[2]; + uint8_t *ptr; + bool sli4; + uint32_t fcpdl; if (!pnode || !NLP_CHK_NODE_ACT(pnode)) return; @@ -2913,8 +4291,13 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, int_to_scsilun(lpfc_cmd->pCmd->device->lun, &lpfc_cmd->fcp_cmnd->fcp_lun); - memset(&fcp_cmnd->fcpCdb[0], 0, LPFC_FCP_CDB_LEN); - memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, scsi_cmnd->cmd_len); + ptr = &fcp_cmnd->fcpCdb[0]; + memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); + if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) { + ptr += scsi_cmnd->cmd_len; + memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len)); + } + if (scsi_populate_tag_msg(scsi_cmnd, tag)) { switch (tag[0]) { case HEAD_OF_QUEUE_TAG: @@ -2928,7 +4311,10 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, break; } } else - fcp_cmnd->fcpCntl1 = 0; + fcp_cmnd->fcpCntl1 = SIMPLE_Q; + + sli4 = (phba->sli_rev == LPFC_SLI_REV4); + piocbq->iocb.un.fcpi.fcpi_XRdy = 0; /* * There are three possibilities here - use scatter-gather segment, use @@ -2939,11 +4325,16 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, if (scsi_sg_count(scsi_cmnd)) { if (datadir == DMA_TO_DEVICE) { iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; - if (phba->sli_rev < LPFC_SLI_REV4) { - iocb_cmd->un.fcpi.fcpi_parm = 0; - iocb_cmd->ulpPU = 0; - } else - iocb_cmd->ulpPU = PARM_READ_CHECK; + iocb_cmd->ulpPU = PARM_READ_CHECK; + if (vport->cfg_first_burst_size && + (pnode->nlp_flag & NLP_FIRSTBURST)) { + fcpdl = scsi_bufflen(scsi_cmnd); + if (fcpdl < vport->cfg_first_burst_size) + piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl; + else + piocbq->iocb.un.fcpi.fcpi_XRdy = + vport->cfg_first_burst_size; + } fcp_cmnd->fcpCntl3 = WRITE_DATA; phba->fc4OutputRequests++; } else { @@ -2967,7 +4358,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, * of the scsi_cmnd request_buffer */ piocbq->iocb.ulpContext = pnode->nlp_rpi; - if (phba->sli_rev == LPFC_SLI_REV4) + if (sli4) piocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[pnode->nlp_rpi]; if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) @@ -3031,10 +4422,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, piocb->ulpContext = vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; } - if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { - piocb->ulpFCP2Rcvy = 1; - } + piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); + piocb->ulpPU = 0; + piocb->un.fcpi.fcpi_parm = 0; /* ulpTimeout is only one byte */ if (lpfc_cmd->timeout > 0xff) { @@ -3072,12 +4463,14 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) case LPFC_PCI_DEV_LP: phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; + phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3; phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; break; case LPFC_PCI_DEV_OC: phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; + phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4; phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; break; @@ -3128,7 +4521,7 @@ lpfc_info(struct Scsi_Host *host) { struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; struct lpfc_hba *phba = vport->phba; - int len; + int len, link_speed = 0; static char lpfcinfobuf[384]; memset(lpfcinfobuf,0,384); @@ -3149,12 +4542,18 @@ lpfc_info(struct Scsi_Host *host) phba->Port); } len = strlen(lpfcinfobuf); - if (phba->sli4_hba.link_state.logical_speed) { - snprintf(lpfcinfobuf + len, - 384-len, - " Logical Link Speed: %d Mbps", - phba->sli4_hba.link_state.logical_speed * 10); + if (phba->sli_rev <= LPFC_SLI_REV3) { + link_speed = lpfc_sli_port_speed_get(phba); + } else { + if (phba->sli4_hba.link_state.logical_speed) + link_speed = + phba->sli4_hba.link_state.logical_speed; + else + link_speed = phba->sli4_hba.link_state.speed; } + if (link_speed != 0) + snprintf(lpfcinfobuf + len, 384-len, + " Logical Link Speed: %d Mbps", link_speed); } return lpfcinfobuf; } @@ -3171,7 +4570,7 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) unsigned long poll_tmo_expires = (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); - if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt) + if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq)) mod_timer(&phba->fcp_poll_timer, poll_tmo_expires); } @@ -3222,36 +4621,26 @@ void lpfc_poll_timeout(unsigned long ptr) * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. **/ static int -lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) +lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) { - struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - struct lpfc_rport_data *rdata = cmnd->device->hostdata; + struct lpfc_rport_data *rdata; struct lpfc_nodelist *ndlp; struct lpfc_scsi_buf *lpfc_cmd; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); int err; + rdata = lpfc_rport_data_from_scsi_device(cmnd->device); err = fc_remote_port_chkready(rport); if (err) { cmnd->result = err; goto out_fail_command; } - /* - * Do not let the mid-layer retry I/O too fast. If an I/O is retried - * without waiting a bit then indicate that the device is busy. - */ - if (cmnd->retries && - time_before(jiffies, (cmnd->jiffies_at_alloc + - msecs_to_jiffies(LPFC_RETRY_PAUSE * - cmnd->retries)))) - return SCSI_MLQUEUE_DEVICE_BUSY; ndlp = rdata->pnode; if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && - (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) || - (phba->sli_rev == LPFC_SLI_REV4))) { + (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" @@ -3266,10 +4655,8 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) * Catch race where our node has transitioned, but the * transport is still transitioning. */ - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { - cmnd->result = ScsiResult(DID_IMM_RETRY, 0); - goto out_fail_command; - } + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + goto out_tgt_busy; if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) goto out_tgt_busy; @@ -3292,68 +4679,30 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) lpfc_cmd->timeout = 0; lpfc_cmd->start_time = jiffies; cmnd->host_scribble = (unsigned char *)lpfc_cmd; - cmnd->scsi_done = done; if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { if (vport->phba->cfg_enable_bg) { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9033 BLKGRD: rcvd protected cmd:%02x op:%02x " - "str=%s\n", - cmnd->cmnd[0], scsi_get_prot_op(cmnd), - dif_op_str[scsi_get_prot_op(cmnd)]); - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " - "%02x %02x %02x %02x %02x\n", - cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], - cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], - cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], - cmnd->cmnd[9]); - if (cmnd->cmnd[0] == READ_10) - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9035 BLKGRD: READ @ sector %llu, " - "count %u\n", - (unsigned long long)scsi_get_lba(cmnd), - blk_rq_sectors(cmnd->request)); - else if (cmnd->cmnd[0] == WRITE_10) - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9036 BLKGRD: WRITE @ sector %llu, " - "count %u cmd=%p\n", - (unsigned long long)scsi_get_lba(cmnd), - blk_rq_sectors(cmnd->request), - cmnd); + lpfc_printf_vlog(vport, + KERN_INFO, LOG_SCSI_CMD, + "9033 BLKGRD: rcvd %s cmd:x%x " + "sector x%llx cnt %u pt %x\n", + dif_op_str[scsi_get_prot_op(cmnd)], + cmnd->cmnd[0], + (unsigned long long)scsi_get_lba(cmnd), + blk_rq_sectors(cmnd->request), + (cmnd->cmnd[1]>>5)); } - err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); } else { if (vport->phba->cfg_enable_bg) { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9038 BLKGRD: rcvd unprotected cmd:" - "%02x op:%02x str=%s\n", - cmnd->cmnd[0], scsi_get_prot_op(cmnd), - dif_op_str[scsi_get_prot_op(cmnd)]); - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9039 BLKGRD: CDB: %02x %02x %02x " - "%02x %02x %02x %02x %02x %02x %02x\n", - cmnd->cmnd[0], cmnd->cmnd[1], - cmnd->cmnd[2], cmnd->cmnd[3], - cmnd->cmnd[4], cmnd->cmnd[5], - cmnd->cmnd[6], cmnd->cmnd[7], - cmnd->cmnd[8], cmnd->cmnd[9]); - if (cmnd->cmnd[0] == READ_10) - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9040 dbg: READ @ sector %llu, " - "count %u\n", - (unsigned long long)scsi_get_lba(cmnd), - blk_rq_sectors(cmnd->request)); - else if (cmnd->cmnd[0] == WRITE_10) - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9041 dbg: WRITE @ sector %llu, " - "count %u cmd=%p\n", + lpfc_printf_vlog(vport, + KERN_INFO, LOG_SCSI_CMD, + "9038 BLKGRD: rcvd PROT_NORMAL cmd: " + "x%x sector x%llx cnt %u pt %x\n", + cmnd->cmnd[0], (unsigned long long)scsi_get_lba(cmnd), - blk_rq_sectors(cmnd->request), cmnd); - else - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9042 dbg: parser not implemented\n"); + blk_rq_sectors(cmnd->request), + (cmnd->cmnd[1]>>5)); } err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); } @@ -3368,14 +4717,30 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); if (err) { atomic_dec(&ndlp->cmd_pending); + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "3376 FCP could not issue IOCB err %x" + "FCP cmd x%x <%d/%d> " + "sid: x%x did: x%x oxid: x%x " + "Data: x%x x%x x%x x%x\n", + err, cmnd->cmnd[0], + cmnd->device ? cmnd->device->id : 0xffff, + cmnd->device ? cmnd->device->lun : 0xffff, + vport->fc_myDID, ndlp->nlp_DID, + phba->sli_rev == LPFC_SLI_REV4 ? + lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, + lpfc_cmd->cur_iocbq.iocb.ulpContext, + lpfc_cmd->cur_iocbq.iocb.ulpIoTag, + lpfc_cmd->cur_iocbq.iocb.ulpTimeout, + (uint32_t) + (cmnd->request->timeout / 1000)); + + goto out_host_busy_free_buf; } if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { - spin_unlock(shost->host_lock); lpfc_sli_handle_fast_ring_event(phba, &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); - spin_lock(shost->host_lock); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); } @@ -3392,11 +4757,10 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) return SCSI_MLQUEUE_TARGET_BUSY; out_fail_command: - done(cmnd); + cmnd->scsi_done(cmnd); return 0; } -static DEF_SCSI_QCMD(lpfc_queuecommand) /** * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point @@ -3418,40 +4782,77 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) struct lpfc_iocbq *abtsiocb; struct lpfc_scsi_buf *lpfc_cmd; IOCB_t *cmd, *icmd; - int ret = SUCCESS; + int ret = SUCCESS, status = 0; + struct lpfc_sli_ring *pring_s4; + int ring_number, ret_val; + unsigned long flags, iflags; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); - ret = fc_block_scsi_eh(cmnd); - if (ret) - return ret; + status = fc_block_scsi_eh(cmnd); + if (status != 0 && status != SUCCESS) + return status; + + spin_lock_irqsave(&phba->hbalock, flags); + /* driver queued commands are in process of being flushed */ + if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "3168 SCSI Layer abort requested I/O has been " + "flushed by LLD.\n"); + return FAILED; + } + lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; - if (!lpfc_cmd) { + if (!lpfc_cmd || !lpfc_cmd->pCmd) { + spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "2873 SCSI Layer I/O Abort Request IO CMPL Status " "x%x ID %d LUN %d\n", - ret, cmnd->device->id, cmnd->device->lun); + SUCCESS, cmnd->device->id, cmnd->device->lun); return SUCCESS; } + iocb = &lpfc_cmd->cur_iocbq; + /* the command is in process of being cancelled */ + if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "3169 SCSI Layer abort requested I/O has been " + "cancelled by LLD.\n"); + return FAILED; + } /* * If pCmd field of the corresponding lpfc_scsi_buf structure * points to a different SCSI command, then the driver has * already completed this command, but the midlayer did not - * see the completion before the eh fired. Just return - * SUCCESS. + * see the completion before the eh fired. Just return SUCCESS. */ - iocb = &lpfc_cmd->cur_iocbq; - if (lpfc_cmd->pCmd != cmnd) - goto out; + if (lpfc_cmd->pCmd != cmnd) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "3170 SCSI Layer abort requested I/O has been " + "completed by LLD.\n"); + goto out_unlock; + } BUG_ON(iocb->context1 != lpfc_cmd); - abtsiocb = lpfc_sli_get_iocbq(phba); + /* abort issued in recovery is still in progress */ + if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "3389 SCSI Layer I/O Abort Request is pending\n"); + spin_unlock_irqrestore(&phba->hbalock, flags); + goto wait_for_cmpl; + } + + abtsiocb = __lpfc_sli_get_iocbq(phba); if (abtsiocb == NULL) { ret = FAILED; - goto out; + goto out_unlock; } + /* Indicate the IO is being aborted by the driver. */ + iocb->iocb_flag |= LPFC_DRIVER_ABORTED; + /* * The scsi command can not be in txq and it is in flight because the * pCmd is still pointig at the SCSI command we have to abort. There @@ -3481,8 +4882,23 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; abtsiocb->vport = vport; - if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == - IOCB_ERROR) { + if (phba->sli_rev == LPFC_SLI_REV4) { + ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx; + pring_s4 = &phba->sli.ring[ring_number]; + /* Note: both hbalock and ring_lock must be set here */ + spin_lock_irqsave(&pring_s4->ring_lock, iflags); + ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, + abtsiocb, 0); + spin_unlock_irqrestore(&pring_s4->ring_lock, iflags); + } else { + ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, + abtsiocb, 0); + } + /* no longer need the lock after this point */ + spin_unlock_irqrestore(&phba->hbalock, flags); + + + if (ret_val == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, abtsiocb); ret = FAILED; goto out; @@ -3492,26 +4908,31 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) lpfc_sli_handle_fast_ring_event(phba, &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); +wait_for_cmpl: lpfc_cmd->waitq = &waitq; /* Wait for abort to complete */ wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd), - (2*vport->cfg_devloss_tmo*HZ)); + msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); - spin_lock_irq(shost->host_lock); + spin_lock_irqsave(shost->host_lock, flags); lpfc_cmd->waitq = NULL; - spin_unlock_irq(shost->host_lock); + spin_unlock_irqrestore(shost->host_lock, flags); if (lpfc_cmd->pCmd == cmnd) { ret = FAILED; lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0748 abort handler timed out waiting " - "for abort to complete: ret %#x, ID %d, " - "LUN %d\n", - ret, cmnd->device->id, cmnd->device->lun); + "for abortng I/O (xri:x%x) to complete: " + "ret %#x, ID %d, LUN %d\n", + iocb->sli4_xritag, ret, + cmnd->device->id, cmnd->device->lun); } + goto out; - out: +out_unlock: + spin_unlock_irqrestore(&phba->hbalock, flags); +out: lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "0749 SCSI Layer I/O Abort Request Status x%x ID %d " "LUN %d\n", ret, cmnd->device->id, @@ -3542,6 +4963,73 @@ lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) } } + +/** + * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. + * + * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded + * + * Return code : + * 0x2003 - Error + * 0x2002 - Success + **/ +static int +lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd) +{ + struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; + uint32_t rsp_info; + uint32_t rsp_len; + uint8_t rsp_info_code; + int ret = FAILED; + + + if (fcprsp == NULL) + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0703 fcp_rsp is missing\n"); + else { + rsp_info = fcprsp->rspStatus2; + rsp_len = be32_to_cpu(fcprsp->rspRspLen); + rsp_info_code = fcprsp->rspInfo3; + + + lpfc_printf_vlog(vport, KERN_INFO, + LOG_FCP, + "0706 fcp_rsp valid 0x%x," + " rsp len=%d code 0x%x\n", + rsp_info, + rsp_len, rsp_info_code); + + if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) { + switch (rsp_info_code) { + case RSP_NO_FAILURE: + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0715 Task Mgmt No Failure\n"); + ret = SUCCESS; + break; + case RSP_TM_NOT_SUPPORTED: /* TM rejected */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0716 Task Mgmt Target " + "reject\n"); + break; + case RSP_TM_NOT_COMPLETED: /* TM failed */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0717 Task Mgmt Target " + "failed TM\n"); + break; + case RSP_TM_INVALID_LU: /* TM to invalid LU! */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0718 Task Mgmt to invalid " + "LUN\n"); + break; + } + } + } + return ret; +} + + /** * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler * @vport: The virtual port for which this call is being executed. @@ -3576,7 +5064,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode); if (lpfc_cmd == NULL) return FAILED; - lpfc_cmd->timeout = 60; + lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; lpfc_cmd->rdata = rdata; status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, @@ -3592,6 +5080,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, lpfc_release_scsi_buf(phba, lpfc_cmd); return FAILED; } + iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0702 Issue %s to TGT %d LUN %d " @@ -3602,13 +5091,8 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, iocbq, iocbqrsp, lpfc_cmd->timeout); - if (status != IOCB_SUCCESS) { - if (status == IOCB_TIMEDOUT) { - iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; - ret = TIMEOUT_ERROR; - } else - ret = FAILED; - lpfc_cmd->status = IOSTAT_DRIVER_REJECT; + if ((status != IOCB_SUCCESS) || + (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0727 TMF %s to TGT %d LUN %d failed (%d, %d) " "iocb_flag x%x\n", @@ -3616,9 +5100,21 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, iocbqrsp->iocb.un.ulpWord[4], iocbq->iocb_flag); - } else if (status == IOCB_BUSY) - ret = FAILED; - else + /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ + if (status == IOCB_SUCCESS) { + if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) + /* Something in the FCP_RSP was invalid. + * Check conditions */ + ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); + else + ret = FAILED; + } else if (status == IOCB_TIMEDOUT) { + ret = TIMEOUT_ERROR; + } else { + ret = FAILED; + } + lpfc_cmd->status = IOSTAT_DRIVER_REJECT; + } else ret = SUCCESS; lpfc_sli_release_iocbq(phba, iocbqrsp); @@ -3644,10 +5140,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, static int lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) { - struct lpfc_rport_data *rdata = cmnd->device->hostdata; + struct lpfc_rport_data *rdata; struct lpfc_nodelist *pnode; unsigned long later; + rdata = lpfc_rport_data_from_scsi_device(cmnd->device); if (!rdata) { lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0797 Tgt Map rport failure: rdata x%p\n", rdata); @@ -3665,7 +5162,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) if (pnode->nlp_state == NLP_STE_MAPPED_NODE) return SUCCESS; schedule_timeout_uninterruptible(msecs_to_jiffies(500)); - rdata = cmnd->device->hostdata; + rdata = lpfc_rport_data_from_scsi_device(cmnd->device); if (!rdata) return FAILED; pnode = rdata->pnode; @@ -3702,8 +5199,9 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); if (cnt) - lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], - tgt_id, lun_id, context); + lpfc_sli_abort_taskmgmt(vport, + &phba->sli.ring[phba->sli.fcp_ring], + tgt_id, lun_id, context); later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; while (time_after(later, jiffies) && cnt) { schedule_timeout_uninterruptible(msecs_to_jiffies(20)); @@ -3737,13 +5235,14 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - struct lpfc_rport_data *rdata = cmnd->device->hostdata; + struct lpfc_rport_data *rdata; struct lpfc_nodelist *pnode; unsigned tgt_id = cmnd->device->id; unsigned int lun_id = cmnd->device->lun; struct lpfc_scsi_event_header scsi_event; int status; + rdata = lpfc_rport_data_from_scsi_device(cmnd->device); if (!rdata) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0798 Device Reset rport failure: rdata x%p\n", rdata); @@ -3751,7 +5250,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) } pnode = rdata->pnode; status = fc_block_scsi_eh(cmnd); - if (status) + if (status != 0 && status != SUCCESS) return status; status = lpfc_chk_tgt_mapped(vport, cmnd); @@ -3783,8 +5282,10 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) * So, continue on. * We will report success if all the i/o aborts successfully. */ - status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, + if (status == SUCCESS) + status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, LPFC_CTX_LUN); + return status; } @@ -3804,13 +5305,14 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - struct lpfc_rport_data *rdata = cmnd->device->hostdata; + struct lpfc_rport_data *rdata; struct lpfc_nodelist *pnode; unsigned tgt_id = cmnd->device->id; unsigned int lun_id = cmnd->device->lun; struct lpfc_scsi_event_header scsi_event; int status; + rdata = lpfc_rport_data_from_scsi_device(cmnd->device); if (!rdata) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0799 Target Reset rport failure: rdata x%p\n", rdata); @@ -3818,7 +5320,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) } pnode = rdata->pnode; status = fc_block_scsi_eh(cmnd); - if (status) + if (status != 0 && status != SUCCESS) return status; status = lpfc_chk_tgt_mapped(vport, cmnd); @@ -3850,8 +5352,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) * So, continue on. * We will report success if all the i/o aborts successfully. */ - status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, - LPFC_CTX_TGT); + if (status == SUCCESS) + status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, + LPFC_CTX_TGT); return status; } @@ -3886,7 +5389,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); status = fc_block_scsi_eh(cmnd); - if (status) + if (status != 0 && status != SUCCESS) return status; /* @@ -3901,6 +5404,9 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; + if (vport->phba->cfg_fcp2_no_tgt_reset && + (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) + continue; if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && ndlp->nlp_sid == i && ndlp->rport) { @@ -3939,6 +5445,51 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) } /** + * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt + * @cmnd: Pointer to scsi_cmnd data structure. + * + * This routine does host reset to the adaptor port. It brings the HBA + * offline, performs a board restart, and then brings the board back online. + * The lpfc_offline calls lpfc_sli_hba_down which will abort and local + * reject all outstanding SCSI commands to the host and error returned + * back to SCSI mid-level. As this will be SCSI mid-level's last resort + * of error handling, it will only return error if resetting of the adapter + * is not successful; in all other cases, will return success. + * + * Return code : + * 0x2003 - Error + * 0x2002 - Success + **/ +static int +lpfc_host_reset_handler(struct scsi_cmnd *cmnd) +{ + struct Scsi_Host *shost = cmnd->device->host; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int rc, ret = SUCCESS; + + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "3172 SCSI layer issued Host Reset Data:\n"); + + lpfc_offline_prep(phba, LPFC_MBX_WAIT); + lpfc_offline(phba); + rc = lpfc_sli_brdrestart(phba); + if (rc) + ret = FAILED; + rc = lpfc_online(phba); + if (rc) + ret = FAILED; + lpfc_unblock_mgmt_io(phba); + + if (ret == FAILED) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "3323 Failed host reset, bring it offline\n"); + lpfc_sli4_offline_eratt(phba); + } + return ret; +} + +/** * lpfc_slave_alloc - scsi_host_template slave_alloc entry point * @sdev: Pointer to scsi_device. * @@ -3961,11 +5512,45 @@ lpfc_slave_alloc(struct scsi_device *sdev) uint32_t num_to_alloc = 0; int num_allocated = 0; uint32_t sdev_cnt; + struct lpfc_device_data *device_data; + unsigned long flags; + struct lpfc_name target_wwpn; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; - sdev->hostdata = rport->dd_data; + if (phba->cfg_fof) { + + /* + * Check to see if the device data structure for the lun + * exists. If not, create one. + */ + + u64_to_wwn(rport->port_name, target_wwpn.u.wwn); + spin_lock_irqsave(&phba->devicelock, flags); + device_data = __lpfc_get_device_data(phba, + &phba->luns, + &vport->fc_portname, + &target_wwpn, + sdev->lun); + if (!device_data) { + spin_unlock_irqrestore(&phba->devicelock, flags); + device_data = lpfc_create_device_data(phba, + &vport->fc_portname, + &target_wwpn, + sdev->lun, true); + if (!device_data) + return -ENOMEM; + spin_lock_irqsave(&phba->devicelock, flags); + list_add_tail(&device_data->listentry, &phba->luns); + } + device_data->rport_data = rport->dd_data; + device_data->available = true; + spin_unlock_irqrestore(&phba->devicelock, flags); + sdev->hostdata = device_data; + } else { + sdev->hostdata = rport->dd_data; + } sdev_cnt = atomic_inc_return(&phba->sdev_cnt); /* @@ -4001,11 +5586,11 @@ lpfc_slave_alloc(struct scsi_device *sdev) } num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); if (num_to_alloc != num_allocated) { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, - "0708 Allocation request of %d " - "command buffers did not succeed. " - "Allocated %d buffers.\n", - num_to_alloc, num_allocated); + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0708 Allocation request of %d " + "command buffers did not succeed. " + "Allocated %d buffers.\n", + num_to_alloc, num_allocated); } if (num_allocated > 0) phba->total_scsi_bufs += num_allocated; @@ -4055,11 +5640,344 @@ lpfc_slave_destroy(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; + unsigned long flags; + struct lpfc_device_data *device_data = sdev->hostdata; + atomic_dec(&phba->sdev_cnt); + if ((phba->cfg_fof) && (device_data)) { + spin_lock_irqsave(&phba->devicelock, flags); + device_data->available = false; + if (!device_data->oas_enabled) + lpfc_delete_device_data(phba, device_data); + spin_unlock_irqrestore(&phba->devicelock, flags); + } sdev->hostdata = NULL; return; } +/** + * lpfc_create_device_data - creates and initializes device data structure for OAS + * @pha: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun on target + * @atomic_create: Flag to indicate if memory should be allocated using the + * GFP_ATOMIC flag or not. + * + * This routine creates a device data structure which will contain identifying + * information for the device (host wwpn, target wwpn, lun), state of OAS, + * whether or not the corresponding lun is available by the system, + * and pointer to the rport data. + * + * Return codes: + * NULL - Error + * Pointer to lpfc_device_data - Success + **/ +struct lpfc_device_data* +lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, + struct lpfc_name *target_wwpn, uint64_t lun, + bool atomic_create) +{ + + struct lpfc_device_data *lun_info; + int memory_flags; + + if (unlikely(!phba) || !vport_wwpn || !target_wwpn || + !(phba->cfg_fof)) + return NULL; + + /* Attempt to create the device data to contain lun info */ + + if (atomic_create) + memory_flags = GFP_ATOMIC; + else + memory_flags = GFP_KERNEL; + lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags); + if (!lun_info) + return NULL; + INIT_LIST_HEAD(&lun_info->listentry); + lun_info->rport_data = NULL; + memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn, + sizeof(struct lpfc_name)); + memcpy(&lun_info->device_id.target_wwpn, target_wwpn, + sizeof(struct lpfc_name)); + lun_info->device_id.lun = lun; + lun_info->oas_enabled = false; + lun_info->available = false; + return lun_info; +} + +/** + * lpfc_delete_device_data - frees a device data structure for OAS + * @pha: Pointer to host bus adapter structure. + * @lun_info: Pointer to device data structure to free. + * + * This routine frees the previously allocated device data structure passed. + * + **/ +void +lpfc_delete_device_data(struct lpfc_hba *phba, + struct lpfc_device_data *lun_info) +{ + + if (unlikely(!phba) || !lun_info || + !(phba->cfg_fof)) + return; + + if (!list_empty(&lun_info->listentry)) + list_del(&lun_info->listentry); + mempool_free(lun_info, phba->device_data_mem_pool); + return; +} + +/** + * __lpfc_get_device_data - returns the device data for the specified lun + * @pha: Pointer to host bus adapter structure. + * @list: Point to list to search. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun on target + * + * This routine searches the list passed for the specified lun's device data. + * This function does not hold locks, it is the responsibility of the caller + * to ensure the proper lock is held before calling the function. + * + * Return codes: + * NULL - Error + * Pointer to lpfc_device_data - Success + **/ +struct lpfc_device_data* +__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list, + struct lpfc_name *vport_wwpn, + struct lpfc_name *target_wwpn, uint64_t lun) +{ + + struct lpfc_device_data *lun_info; + + if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || + !phba->cfg_fof) + return NULL; + + /* Check to see if the lun is already enabled for OAS. */ + + list_for_each_entry(lun_info, list, listentry) { + if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, + sizeof(struct lpfc_name)) == 0) && + (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, + sizeof(struct lpfc_name)) == 0) && + (lun_info->device_id.lun == lun)) + return lun_info; + } + + return NULL; +} + +/** + * lpfc_find_next_oas_lun - searches for the next oas lun + * @pha: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @starting_lun: Pointer to the lun to start searching for + * @found_vport_wwpn: Pointer to the found lun's vport wwpn information + * @found_target_wwpn: Pointer to the found lun's target wwpn information + * @found_lun: Pointer to the found lun. + * @found_lun_status: Pointer to status of the found lun. + * + * This routine searches the luns list for the specified lun + * or the first lun for the vport/target. If the vport wwpn contains + * a zero value then a specific vport is not specified. In this case + * any vport which contains the lun will be considered a match. If the + * target wwpn contains a zero value then a specific target is not specified. + * In this case any target which contains the lun will be considered a + * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status + * are returned. The function will also return the next lun if available. + * If the next lun is not found, starting_lun parameter will be set to + * NO_MORE_OAS_LUN. + * + * Return codes: + * non-0 - Error + * 0 - Success + **/ +bool +lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, + struct lpfc_name *target_wwpn, uint64_t *starting_lun, + struct lpfc_name *found_vport_wwpn, + struct lpfc_name *found_target_wwpn, + uint64_t *found_lun, + uint32_t *found_lun_status) +{ + + unsigned long flags; + struct lpfc_device_data *lun_info; + struct lpfc_device_id *device_id; + uint64_t lun; + bool found = false; + + if (unlikely(!phba) || !vport_wwpn || !target_wwpn || + !starting_lun || !found_vport_wwpn || + !found_target_wwpn || !found_lun || !found_lun_status || + (*starting_lun == NO_MORE_OAS_LUN) || + !phba->cfg_fof) + return false; + + lun = *starting_lun; + *found_lun = NO_MORE_OAS_LUN; + *starting_lun = NO_MORE_OAS_LUN; + + /* Search for lun or the lun closet in value */ + + spin_lock_irqsave(&phba->devicelock, flags); + list_for_each_entry(lun_info, &phba->luns, listentry) { + if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) || + (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, + sizeof(struct lpfc_name)) == 0)) && + ((wwn_to_u64(target_wwpn->u.wwn) == 0) || + (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, + sizeof(struct lpfc_name)) == 0)) && + (lun_info->oas_enabled)) { + device_id = &lun_info->device_id; + if ((!found) && + ((lun == FIND_FIRST_OAS_LUN) || + (device_id->lun == lun))) { + *found_lun = device_id->lun; + memcpy(found_vport_wwpn, + &device_id->vport_wwpn, + sizeof(struct lpfc_name)); + memcpy(found_target_wwpn, + &device_id->target_wwpn, + sizeof(struct lpfc_name)); + if (lun_info->available) + *found_lun_status = + OAS_LUN_STATUS_EXISTS; + else + *found_lun_status = 0; + if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) + memset(vport_wwpn, 0x0, + sizeof(struct lpfc_name)); + if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET) + memset(target_wwpn, 0x0, + sizeof(struct lpfc_name)); + found = true; + } else if (found) { + *starting_lun = device_id->lun; + memcpy(vport_wwpn, &device_id->vport_wwpn, + sizeof(struct lpfc_name)); + memcpy(target_wwpn, &device_id->target_wwpn, + sizeof(struct lpfc_name)); + break; + } + } + } + spin_unlock_irqrestore(&phba->devicelock, flags); + return found; +} + +/** + * lpfc_enable_oas_lun - enables a lun for OAS operations + * @pha: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun + * + * This routine enables a lun for oas operations. The routines does so by + * doing the following : + * + * 1) Checks to see if the device data for the lun has been created. + * 2) If found, sets the OAS enabled flag if not set and returns. + * 3) Otherwise, creates a device data structure. + * 4) If successfully created, indicates the device data is for an OAS lun, + * indicates the lun is not available and add to the list of luns. + * + * Return codes: + * false - Error + * true - Success + **/ +bool +lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, + struct lpfc_name *target_wwpn, uint64_t lun) +{ + + struct lpfc_device_data *lun_info; + unsigned long flags; + + if (unlikely(!phba) || !vport_wwpn || !target_wwpn || + !phba->cfg_fof) + return false; + + spin_lock_irqsave(&phba->devicelock, flags); + + /* Check to see if the device data for the lun has been created */ + lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn, + target_wwpn, lun); + if (lun_info) { + if (!lun_info->oas_enabled) + lun_info->oas_enabled = true; + spin_unlock_irqrestore(&phba->devicelock, flags); + return true; + } + + /* Create an lun info structure and add to list of luns */ + lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, + false); + if (lun_info) { + lun_info->oas_enabled = true; + lun_info->available = false; + list_add_tail(&lun_info->listentry, &phba->luns); + spin_unlock_irqrestore(&phba->devicelock, flags); + return true; + } + spin_unlock_irqrestore(&phba->devicelock, flags); + return false; +} + +/** + * lpfc_disable_oas_lun - disables a lun for OAS operations + * @pha: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun + * + * This routine disables a lun for oas operations. The routines does so by + * doing the following : + * + * 1) Checks to see if the device data for the lun is created. + * 2) If present, clears the flag indicating this lun is for OAS. + * 3) If the lun is not available by the system, the device data is + * freed. + * + * Return codes: + * false - Error + * true - Success + **/ +bool +lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, + struct lpfc_name *target_wwpn, uint64_t lun) +{ + + struct lpfc_device_data *lun_info; + unsigned long flags; + + if (unlikely(!phba) || !vport_wwpn || !target_wwpn || + !phba->cfg_fof) + return false; + + spin_lock_irqsave(&phba->devicelock, flags); + + /* Check to see if the lun is available. */ + lun_info = __lpfc_get_device_data(phba, + &phba->luns, vport_wwpn, + target_wwpn, lun); + if (lun_info) { + lun_info->oas_enabled = false; + if (!lun_info->available) + lpfc_delete_device_data(phba, lun_info); + spin_unlock_irqrestore(&phba->devicelock, flags); + return true; + } + + spin_unlock_irqrestore(&phba->devicelock, flags); + return false; +} struct scsi_host_template lpfc_template = { .module = THIS_MODULE, @@ -4070,6 +5988,7 @@ struct scsi_host_template lpfc_template = { .eh_device_reset_handler = lpfc_device_reset_handler, .eh_target_reset_handler = lpfc_target_reset_handler, .eh_bus_reset_handler = lpfc_bus_reset_handler, + .eh_host_reset_handler = lpfc_host_reset_handler, .slave_alloc = lpfc_slave_alloc, .slave_configure = lpfc_slave_configure, .slave_destroy = lpfc_slave_destroy, @@ -4082,6 +6001,7 @@ struct scsi_host_template lpfc_template = { .max_sectors = 0xFFFF, .vendor_id = LPFC_NL_VENDOR_ID, .change_queue_depth = lpfc_change_queue_depth, + .change_queue_type = lpfc_change_queue_type, }; struct scsi_host_template lpfc_vport_template = { @@ -4104,4 +6024,5 @@ struct scsi_host_template lpfc_vport_template = { .shost_attrs = lpfc_vport_attrs, .max_sectors = 0xFFFF, .change_queue_depth = lpfc_change_queue_depth, + .change_queue_type = lpfc_change_queue_type, }; diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 9075a08cf78..0389ac1e7b8 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2006 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -41,6 +41,20 @@ struct lpfc_rport_data { struct lpfc_nodelist *pnode; /* Pointer to the node structure. */ }; +struct lpfc_device_id { + struct lpfc_name vport_wwpn; + struct lpfc_name target_wwpn; + uint64_t lun; +}; + +struct lpfc_device_data { + struct list_head listentry; + struct lpfc_rport_data *rport_data; + struct lpfc_device_id device_id; + bool oas_enabled; + bool available; +}; + struct fcp_rsp { uint32_t rspRsvd1; /* FC Word 0, byte 0:3 */ uint32_t rspRsvd2; /* FC Word 1, byte 0:3 */ @@ -73,6 +87,7 @@ struct fcp_rsp { #define RSP_RO_MISMATCH_ERR 0x03 #define RSP_TM_NOT_SUPPORTED 0x04 /* Task mgmt function not supported */ #define RSP_TM_NOT_COMPLETED 0x05 /* Task mgmt function not performed */ +#define RSP_TM_INVALID_LU 0x09 /* Task mgmt function to invalid LU */ uint32_t rspInfoRsvd; /* FCP_RSP_INFO bytes 4-7 (reserved) */ @@ -150,9 +165,22 @@ struct lpfc_scsi_buf { struct lpfc_iocbq cur_iocbq; wait_queue_head_t *waitq; unsigned long start_time; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + /* Used to restore any changes to protection data for error injection */ + void *prot_data_segment; + uint32_t prot_data; + uint32_t prot_data_type; +#define LPFC_INJERR_REFTAG 1 +#define LPFC_INJERR_APPTAG 2 +#define LPFC_INJERR_GUARD 3 +#endif }; #define LPFC_SCSI_DMA_EXT_SIZE 264 #define LPFC_BPL_SIZE 1024 -#define LPFC_RETRY_PAUSE 300 #define MDAC_DIRECT_CMD 0x22 + +#define FIND_FIRST_OAS_LUN 0 +#define NO_MORE_OAS_LUN -1 +#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 23a27592388..32ada050557 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -67,6 +67,12 @@ static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, struct hbq_dmabuf *); static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_cqe *); +static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *, + int); +static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *, + uint32_t); +static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); +static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); static IOCB_t * lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) @@ -92,6 +98,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) union lpfc_wqe *temp_wqe; struct lpfc_register doorbell; uint32_t host_index; + uint32_t idx; /* sanity check on queue memory */ if (unlikely(!q)) @@ -99,8 +106,12 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) temp_wqe = q->qe[q->host_index].wqe; /* If the host has not yet processed the next entry then we are done */ - if (((q->host_index + 1) % q->entry_count) == q->hba_index) + idx = ((q->host_index + 1) % q->entry_count); + if (idx == q->hba_index) { + q->WQ_overflow++; return -ENOMEM; + } + q->WQ_posted++; /* set consumption flag every once in a while */ if (!((q->host_index + 1) % q->entry_repost)) bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); @@ -110,15 +121,22 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) /* Update the host index before invoking device */ host_index = q->host_index; - q->host_index = ((q->host_index + 1) % q->entry_count); + + q->host_index = idx; /* Ring Doorbell */ doorbell.word0 = 0; - bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1); - bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); - bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); - writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); - readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */ + if (q->db_format == LPFC_DB_LIST_FORMAT) { + bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); + bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index); + bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); + } else if (q->db_format == LPFC_DB_RING_FORMAT) { + bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); + bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); + } else { + return -EINVAL; + } + writel(doorbell.word0, q->db_regaddr); return 0; } @@ -192,7 +210,6 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); - readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */ return 0; } @@ -232,6 +249,7 @@ static struct lpfc_eqe * lpfc_sli4_eq_get(struct lpfc_queue *q) { struct lpfc_eqe *eqe; + uint32_t idx; /* sanity check on queue memory */ if (unlikely(!q)) @@ -242,14 +260,44 @@ lpfc_sli4_eq_get(struct lpfc_queue *q) if (!bf_get_le32(lpfc_eqe_valid, eqe)) return NULL; /* If the host has not yet processed the next entry then we are done */ - if (((q->hba_index + 1) % q->entry_count) == q->host_index) + idx = ((q->hba_index + 1) % q->entry_count); + if (idx == q->host_index) return NULL; - q->hba_index = ((q->hba_index + 1) % q->entry_count); + q->hba_index = idx; + + /* + * insert barrier for instruction interlock : data from the hardware + * must have the valid bit checked before it can be copied and acted + * upon. Given what was seen in lpfc_sli4_cq_get() of speculative + * instructions allowing action on content before valid bit checked, + * add barrier here as well. May not be needed as "content" is a + * single 32-bit entity here (vs multi word structure for cq's). + */ + mb(); return eqe; } /** + * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ + * @q: The Event Queue to disable interrupts + * + **/ +static inline void +lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) +{ + struct lpfc_register doorbell; + + doorbell.word0 = 0; + bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); + bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); + bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, + (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); + bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); +} + +/** * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ * @q: The Event Queue that the host has completed processing for. * @arm: Indicates whether the host wants to arms this CQ. @@ -293,7 +341,9 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) } bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); - bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); + bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, + (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); + bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); /* PCI read to flush PCI pipeline on re-arming for INTx mode */ if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) @@ -314,6 +364,7 @@ static struct lpfc_cqe * lpfc_sli4_cq_get(struct lpfc_queue *q) { struct lpfc_cqe *cqe; + uint32_t idx; /* sanity check on queue memory */ if (unlikely(!q)) @@ -323,11 +374,23 @@ lpfc_sli4_cq_get(struct lpfc_queue *q) if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) return NULL; /* If the host has not yet processed the next entry then we are done */ - if (((q->hba_index + 1) % q->entry_count) == q->host_index) + idx = ((q->hba_index + 1) % q->entry_count); + if (idx == q->host_index) return NULL; cqe = q->qe[q->hba_index].cqe; - q->hba_index = ((q->hba_index + 1) % q->entry_count); + q->hba_index = idx; + + /* + * insert barrier for instruction interlock : data from the hardware + * must have the valid bit checked before it can be copied and acted + * upon. Speculative instructions were allowing a bcopy at the start + * of lpfc_sli4_fp_handle_wcqe(), which is called immediately + * after our return, to copy data before the valid bit check above + * was done. As such, some of the copied data was stale. The barrier + * ensures the check is before any data is copied. + */ + mb(); return cqe; } @@ -372,7 +435,9 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); - bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id); + bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, + (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); + bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); return released; } @@ -396,11 +461,12 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, struct lpfc_rqe *temp_hrqe; struct lpfc_rqe *temp_drqe; struct lpfc_register doorbell; - int put_index = hq->host_index; + int put_index; /* sanity check on queue memory */ if (unlikely(!hq) || unlikely(!dq)) return -ENOMEM; + put_index = hq->host_index; temp_hrqe = hq->qe[hq->host_index].rqe; temp_drqe = dq->qe[dq->host_index].rqe; @@ -421,10 +487,20 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, /* Ring The Header Receive Queue Doorbell */ if (!(hq->host_index % hq->entry_repost)) { doorbell.word0 = 0; - bf_set(lpfc_rq_doorbell_num_posted, &doorbell, - hq->entry_repost); - bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); - writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); + if (hq->db_format == LPFC_DB_RING_FORMAT) { + bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, + hq->entry_repost); + bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); + } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { + bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, + hq->entry_repost); + bf_set(lpfc_rq_db_list_fm_index, &doorbell, + hq->host_index); + bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); + } else { + return -EINVAL; + } + writel(doorbell.word0, hq->db_regaddr); } return put_index; } @@ -466,8 +542,8 @@ lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) static inline IOCB_t * lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { - return (IOCB_t *) (((char *) pring->cmdringaddr) + - pring->cmdidx * phba->iocb_cmd_size); + return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + + pring->sli.sli3.cmdidx * phba->iocb_cmd_size); } /** @@ -483,8 +559,8 @@ lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) static inline IOCB_t * lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { - return (IOCB_t *) (((char *) pring->rspringaddr) + - pring->rspidx * phba->iocb_rsp_size); + return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + + pring->sli.sli3.rspidx * phba->iocb_rsp_size); } /** @@ -496,7 +572,7 @@ lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) * allocation is successful, it returns pointer to the newly * allocated iocb object else it returns NULL. **/ -static struct lpfc_iocbq * +struct lpfc_iocbq * __lpfc_sli_get_iocbq(struct lpfc_hba *phba) { struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; @@ -554,81 +630,6 @@ __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) } /** - * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap. - * @phba: Pointer to HBA context object. - * @ndlp: nodelist pointer for this target. - * @xritag: xri used in this exchange. - * @rxid: Remote Exchange ID. - * @send_rrq: Flag used to determine if we should send rrq els cmd. - * - * This function is called with hbalock held. - * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an - * rrq struct and adds it to the active_rrq_list. - * - * returns 0 for rrq slot for this xri - * < 0 Were not able to get rrq mem or invalid parameter. - **/ -static int -__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, - uint16_t xritag, uint16_t rxid, uint16_t send_rrq) -{ - struct lpfc_node_rrq *rrq; - int empty; - uint32_t did = 0; - - - if (!ndlp) - return -EINVAL; - - if (!phba->cfg_enable_rrq) - return -EINVAL; - - if (phba->pport->load_flag & FC_UNLOADING) { - phba->hba_flag &= ~HBA_RRQ_ACTIVE; - goto out; - } - did = ndlp->nlp_DID; - - /* - * set the active bit even if there is no mem available. - */ - if (NLP_CHK_FREE_REQ(ndlp)) - goto out; - - if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) - goto out; - - if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap)) - goto out; - - rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); - if (rrq) { - rrq->send_rrq = send_rrq; - rrq->xritag = xritag; - rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); - rrq->ndlp = ndlp; - rrq->nlp_DID = ndlp->nlp_DID; - rrq->vport = ndlp->vport; - rrq->rxid = rxid; - empty = list_empty(&phba->active_rrq_list); - rrq->send_rrq = send_rrq; - list_add_tail(&rrq->list, &phba->active_rrq_list); - if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) { - phba->hba_flag |= HBA_RRQ_ACTIVE; - if (empty) - lpfc_worker_wake_up(phba); - } - return 0; - } -out: - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "2921 Can't set rrq active xri:0x%x rxid:0x%x" - " DID:0x%x Send:%d\n", - xritag, rxid, did, send_rrq); - return -EINVAL; -} - -/** * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. * @phba: Pointer to HBA context object. * @xritag: xri used in this exchange. @@ -655,7 +656,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba, if (!ndlp) goto out; - if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) { + if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { rrq->send_rrq = 0; rrq->xritag = 0; rrq->rrq_stop_time = 0; @@ -689,7 +690,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; - next_time = jiffies + HZ * (phba->fc_ratov + 1); + next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { if (time_after(jiffies, rrq->rrq_stop_time)) @@ -698,7 +699,8 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) next_time = rrq->rrq_stop_time; } spin_unlock_irqrestore(&phba->hbalock, iflags); - if (!list_empty(&phba->active_rrq_list)) + if ((!list_empty(&phba->active_rrq_list)) && + (!(phba->pport->load_flag & FC_UNLOADING))) mod_timer(&phba->rrq_tmr, next_time); list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { list_del(&rrq->list); @@ -804,7 +806,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) return; spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; - next_time = jiffies + HZ * (phba->fc_ratov * 2); + next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)); list_splice_init(&phba->active_rrq_list, &rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflags); @@ -812,7 +814,9 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) list_del(&rrq->list); lpfc_clr_rrq_active(phba, rrq->xritag, rrq); } - if (!list_empty(&phba->active_rrq_list)) + if ((!list_empty(&phba->active_rrq_list)) && + (!(phba->pport->load_flag & FC_UNLOADING))) + mod_timer(&phba->rrq_tmr, next_time); } @@ -833,7 +837,9 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, { if (!ndlp) return 0; - if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap)) + if (!ndlp->active_rrqs_xri_bitmap) + return 0; + if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) return 1; else return 0; @@ -856,15 +862,74 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, **/ int lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, - uint16_t xritag, uint16_t rxid, uint16_t send_rrq) + uint16_t xritag, uint16_t rxid, uint16_t send_rrq) { - int ret; unsigned long iflags; + struct lpfc_node_rrq *rrq; + int empty; + + if (!ndlp) + return -EINVAL; + + if (!phba->cfg_enable_rrq) + return -EINVAL; + + spin_lock_irqsave(&phba->hbalock, iflags); + if (phba->pport->load_flag & FC_UNLOADING) { + phba->hba_flag &= ~HBA_RRQ_ACTIVE; + goto out; + } + + /* + * set the active bit even if there is no mem available. + */ + if (NLP_CHK_FREE_REQ(ndlp)) + goto out; + + if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) + goto out; + + if (!ndlp->active_rrqs_xri_bitmap) + goto out; + + if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) + goto out; + spin_unlock_irqrestore(&phba->hbalock, iflags); + rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); + if (!rrq) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" + " DID:0x%x Send:%d\n", + xritag, rxid, ndlp->nlp_DID, send_rrq); + return -EINVAL; + } + if (phba->cfg_enable_rrq == 1) + rrq->send_rrq = send_rrq; + else + rrq->send_rrq = 0; + rrq->xritag = xritag; + rrq->rrq_stop_time = jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); + rrq->ndlp = ndlp; + rrq->nlp_DID = ndlp->nlp_DID; + rrq->vport = ndlp->vport; + rrq->rxid = rxid; spin_lock_irqsave(&phba->hbalock, iflags); - ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq); + empty = list_empty(&phba->active_rrq_list); + list_add_tail(&rrq->list, &phba->active_rrq_list); + phba->hba_flag |= HBA_RRQ_ACTIVE; + if (empty) + lpfc_worker_wake_up(phba); spin_unlock_irqrestore(&phba->hbalock, iflags); - return ret; + return 0; +out: + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2921 Can't set rrq active xri:0x%x rxid:0x%x" + " DID:0x%x Send:%d\n", + xritag, rxid, ndlp->nlp_DID, send_rrq); + return -EINVAL; } /** @@ -893,6 +958,8 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) ndlp = piocbq->context_un.ndlp; + else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) + ndlp = piocbq->context_un.ndlp; else ndlp = piocbq->context1; @@ -901,7 +968,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) while (!found) { if (!sglq) return NULL; - if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { + if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) { /* This xri has an rrq outstanding for this DID. * put it back in the list and get another xri. */ @@ -975,6 +1042,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) else sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); + if (sglq) { if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && (sglq->state != SGL_XRI_ABORTED)) { @@ -991,7 +1059,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) &phba->sli4_hba.lpfc_sgl_list); /* Check if TXQ queue needs to be serviced */ - if (pring->txq_cnt) + if (!list_empty(&pring->txq)) lpfc_worker_wake_up(phba); } } @@ -1022,6 +1090,7 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { size_t start_clean = offsetof(struct lpfc_iocbq, iocb); + /* * Clean all volatile data fields, preserve iotag and node struct. */ @@ -1088,7 +1157,6 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, while (!list_empty(iocblist)) { list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); - if (!piocb->iocb_cmpl) lpfc_sli_release_iocbq(phba, piocb); else { @@ -1275,19 +1343,18 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb) { list_add_tail(&piocb->list, &pring->txcmplq); - piocb->iocb_flag |= LPFC_IO_ON_Q; - pring->txcmplq_cnt++; - if (pring->txcmplq_cnt > pring->txcmplq_max) - pring->txcmplq_max = pring->txcmplq_cnt; + piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; if ((unlikely(pring->ringno == LPFC_ELS_RING)) && (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && - (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { + (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) && + (!(piocb->vport->load_flag & FC_UNLOADING))) { if (!piocb->vport) BUG(); else mod_timer(&piocb->vport->els_tmofunc, - jiffies + HZ * (phba->fc_ratov << 1)); + jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); } @@ -1310,8 +1377,6 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) struct lpfc_iocbq *cmd_iocb; list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); - if (cmd_iocb != NULL) - pring->txq_cnt--; return cmd_iocb; } @@ -1333,21 +1398,23 @@ static IOCB_t * lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; - uint32_t max_cmd_idx = pring->numCiocb; - if ((pring->next_cmdidx == pring->cmdidx) && - (++pring->next_cmdidx >= max_cmd_idx)) - pring->next_cmdidx = 0; + uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; + if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && + (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) + pring->sli.sli3.next_cmdidx = 0; - if (unlikely(pring->local_getidx == pring->next_cmdidx)) { + if (unlikely(pring->sli.sli3.local_getidx == + pring->sli.sli3.next_cmdidx)) { - pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); + pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); - if (unlikely(pring->local_getidx >= max_cmd_idx)) { + if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0315 Ring %d issue: portCmdGet %d " "is bigger than cmd ring %d\n", pring->ringno, - pring->local_getidx, max_cmd_idx); + pring->sli.sli3.local_getidx, + max_cmd_idx); phba->link_state = LPFC_HBA_ERROR; /* @@ -1362,7 +1429,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) return NULL; } - if (pring->local_getidx == pring->next_cmdidx) + if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) return NULL; } @@ -1497,8 +1564,8 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * Let the HBA know what IOCB slot will be the next one the * driver will put a command into. */ - pring->cmdidx = pring->next_cmdidx; - writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); + pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; + writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); } /** @@ -1578,8 +1645,9 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) * (c) link attention events can be processed (fcp ring only) * (d) IOCB processing is not blocked by the outstanding mbox command. */ - if (pring->txq_cnt && - lpfc_is_link_up(phba) && + + if (lpfc_is_link_up(phba) && + (!list_empty(&pring->txq)) && (pring->ringno != phba->sli.fcp_ring || phba->sli.sli_flag & LPFC_PROCESS_LA)) { @@ -2069,6 +2137,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) case MBX_READ_EVENT_LOG: case MBX_SECURITY_MGMT: case MBX_AUTH_PORT: + case MBX_ACCESS_VDATA: ret = mbxCommand; break; default: @@ -2286,7 +2355,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) /* Mailbox cmd <cmd> Cmpl <cmpl> */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " - "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi : 0, pmbox->mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, pmb), @@ -2300,7 +2370,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) pmbox->un.varWords[4], pmbox->un.varWords[5], pmbox->un.varWords[6], - pmbox->un.varWords[7]); + pmbox->un.varWords[7], + pmbox->un.varWords[8], + pmbox->un.varWords[9], + pmbox->un.varWords[10]); if (pmb->mbox_cmpl) pmb->mbox_cmpl(phba,pmb); @@ -2574,9 +2647,8 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, if (iotag != 0 && iotag <= phba->sli.last_iotag) { cmd_iocb = phba->sli.iocbq_lookup[iotag]; list_del_init(&cmd_iocb->list); - if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { - pring->txcmplq_cnt--; - cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; + if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { + cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; } return cmd_iocb; } @@ -2609,14 +2681,13 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, if (iotag != 0 && iotag <= phba->sli.last_iotag) { cmd_iocb = phba->sli.iocbq_lookup[iotag]; - list_del_init(&cmd_iocb->list); - if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { - cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; - pring->txcmplq_cnt--; + if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { + /* remove from txcmpl queue list */ + list_del_init(&cmd_iocb->list); + cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; + return cmd_iocb; } - return cmd_iocb; } - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0372 iotag x%x is out off range: max iotag (x%x)\n", iotag, phba->sli.last_iotag); @@ -2799,7 +2870,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) "0312 Ring %d handler: portRspPut %d " "is bigger than rsp ring %d\n", pring->ringno, le32_to_cpu(pgp->rspPutInx), - pring->numRiocb); + pring->sli.sli3.numRiocb); phba->link_state = LPFC_HBA_ERROR; @@ -2828,10 +2899,26 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) void lpfc_poll_eratt(unsigned long ptr) { struct lpfc_hba *phba; - uint32_t eratt = 0; + uint32_t eratt = 0, rem; + uint64_t sli_intr, cnt; phba = (struct lpfc_hba *)ptr; + /* Here we will also keep track of interrupts per sec of the hba */ + sli_intr = phba->sli.slistat.sli_intr; + + if (phba->sli.slistat.sli_prev_intr > sli_intr) + cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + + sli_intr); + else + cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); + + /* 64-bit integer division not supporte on 32-bit x86 - use do_div */ + rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL); + phba->sli.slistat.sli_ips = cnt; + + phba->sli.slistat.sli_prev_intr = sli_intr; + /* Check chip HA register for error event */ eratt = lpfc_sli_check_eratt(phba); @@ -2840,8 +2927,9 @@ void lpfc_poll_eratt(unsigned long ptr) lpfc_worker_wake_up(phba); else /* Restart the timer for next eratt poll */ - mod_timer(&phba->eratt_poll, jiffies + - HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); return; } @@ -2886,7 +2974,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, * The next available response entry should never exceed the maximum * entries. If it does, treat it as an adapter hardware error. */ - portRspMax = pring->numRiocb; + portRspMax = pring->sli.sli3.numRiocb; portRspPut = le32_to_cpu(pgp->rspPutInx); if (unlikely(portRspPut >= portRspMax)) { lpfc_sli_rsp_pointers_error(phba, pring); @@ -2900,7 +2988,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, phba->fcp_ring_in_use = 1; rmb(); - while (pring->rspidx != portRspPut) { + while (pring->sli.sli3.rspidx != portRspPut) { /* * Fetch an entry off the ring and copy it into a local data * structure. The copy involves a byte-swap since the @@ -2909,8 +2997,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, entry = lpfc_resp_iocb(phba, pring); phba->last_completion_time = jiffies; - if (++pring->rspidx >= portRspMax) - pring->rspidx = 0; + if (++pring->sli.sli3.rspidx >= portRspMax) + pring->sli.sli3.rspidx = 0; lpfc_sli_pcimem_bcopy((uint32_t *) entry, (uint32_t *) &rspiocbq.iocb, @@ -2928,7 +3016,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, * queuedepths of the SCSI device. */ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && - (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { + ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == + IOERR_NO_RESOURCES)) { spin_unlock_irqrestore(&phba->hbalock, iflag); phba->lpfc_rampdown_queue_depth(phba); spin_lock_irqsave(&phba->hbalock, iflag); @@ -3011,9 +3100,10 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, * been updated, sync the pgp->rspPutInx and fetch the new port * response put pointer. */ - writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); + writel(pring->sli.sli3.rspidx, + &phba->host_gp[pring->ringno].rspGetInx); - if (pring->rspidx == portRspPut) + if (pring->sli.sli3.rspidx == portRspPut) portRspPut = le32_to_cpu(pgp->rspPutInx); } @@ -3028,7 +3118,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, pring->stats.iocb_cmd_empty++; /* Force update of the local copy of cmdGetInx */ - pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); + pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); lpfc_sli_resume_iocb(phba, pring); if ((pring->lpfc_sli_cmd_available)) @@ -3099,7 +3189,8 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * queuedepths of the SCSI device. */ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && - (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { + ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == + IOERR_NO_RESOURCES)) { spin_unlock_irqrestore(&phba->hbalock, iflag); phba->lpfc_rampdown_queue_depth(phba); spin_lock_irqsave(&phba->hbalock, iflag); @@ -3202,7 +3293,7 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (free_saveq) { list_for_each_entry_safe(rspiocbp, next_iocb, &saveq->list, list) { - list_del(&rspiocbp->list); + list_del_init(&rspiocbp->list); __lpfc_sli_release_iocbq(phba, rspiocbp); } __lpfc_sli_release_iocbq(phba, saveq); @@ -3260,7 +3351,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, * The next available response entry should never exceed the maximum * entries. If it does, treat it as an adapter hardware error. */ - portRspMax = pring->numRiocb; + portRspMax = pring->sli.sli3.numRiocb; portRspPut = le32_to_cpu(pgp->rspPutInx); if (portRspPut >= portRspMax) { /* @@ -3282,7 +3373,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, } rmb(); - while (pring->rspidx != portRspPut) { + while (pring->sli.sli3.rspidx != portRspPut) { /* * Build a completion list and call the appropriate handler. * The process is to get the next available response iocb, get @@ -3310,8 +3401,8 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, phba->iocb_rsp_size); irsp = &rspiocbp->iocb; - if (++pring->rspidx >= portRspMax) - pring->rspidx = 0; + if (++pring->sli.sli3.rspidx >= portRspMax) + pring->sli.sli3.rspidx = 0; if (pring->ringno == LPFC_ELS_RING) { lpfc_debugfs_slow_ring_trc(phba, @@ -3321,7 +3412,8 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, *(((uint32_t *) irsp) + 7)); } - writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); + writel(pring->sli.sli3.rspidx, + &phba->host_gp[pring->ringno].rspGetInx); spin_unlock_irqrestore(&phba->hbalock, iflag); /* Handle the response IOCB */ @@ -3333,10 +3425,10 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, * the pgp->rspPutInx in the MAILBOX_tand fetch the new port * response put pointer. */ - if (pring->rspidx == portRspPut) { + if (pring->sli.sli3.rspidx == portRspPut) { portRspPut = le32_to_cpu(pgp->rspPutInx); } - } /* while (pring->rspidx != portRspPut) */ + } /* while (pring->sli.sli3.rspidx != portRspPut) */ if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { /* At least one response entry has been freed */ @@ -3351,7 +3443,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, pring->stats.iocb_cmd_empty++; /* Force update of the local copy of cmdGetInx */ - pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); + pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); lpfc_sli_resume_iocb(phba, pring); if ((pring->lpfc_sli_cmd_available)) @@ -3440,15 +3532,27 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) /* Error everything on txq and txcmplq * First do the txq. */ - spin_lock_irq(&phba->hbalock); - list_splice_init(&pring->txq, &completions); - pring->txq_cnt = 0; + if (phba->sli_rev >= LPFC_SLI_REV4) { + spin_lock_irq(&pring->ring_lock); + list_splice_init(&pring->txq, &completions); + pring->txq_cnt = 0; + spin_unlock_irq(&pring->ring_lock); - /* Next issue ABTS for everything on the txcmplq */ - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) - lpfc_sli_issue_abort_iotag(phba, pring, iocb); + spin_lock_irq(&phba->hbalock); + /* Next issue ABTS for everything on the txcmplq */ + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) + lpfc_sli_issue_abort_iotag(phba, pring, iocb); + spin_unlock_irq(&phba->hbalock); + } else { + spin_lock_irq(&phba->hbalock); + list_splice_init(&pring->txq, &completions); + pring->txq_cnt = 0; - spin_unlock_irq(&phba->hbalock); + /* Next issue ABTS for everything on the txcmplq */ + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) + lpfc_sli_issue_abort_iotag(phba, pring, iocb); + spin_unlock_irq(&phba->hbalock); + } /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, @@ -3456,6 +3560,36 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) } /** + * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * This function aborts all iocbs in FCP rings and frees all the iocb + * objects in txq. This function issues an abort iocb for all the iocb commands + * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before + * the return of this function. The caller is not required to hold any locks. + **/ +void +lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + uint32_t i; + + /* Look on all the FCP Rings for the iotag */ + if (phba->sli_rev >= LPFC_SLI_REV4) { + for (i = 0; i < phba->cfg_fcp_io_channel; i++) { + pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; + lpfc_sli_abort_iocb_ring(phba, pring); + } + } else { + pring = &psli->ring[psli->fcp_ring]; + lpfc_sli_abort_iocb_ring(phba, pring); + } +} + + +/** * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring * @phba: Pointer to HBA context object. * @@ -3472,27 +3606,55 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) LIST_HEAD(txcmplq); struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; - - /* Currently, only one fcp ring */ - pring = &psli->ring[psli->fcp_ring]; + uint32_t i; spin_lock_irq(&phba->hbalock); - /* Retrieve everything on txq */ - list_splice_init(&pring->txq, &txq); - pring->txq_cnt = 0; - - /* Retrieve everything on the txcmplq */ - list_splice_init(&pring->txcmplq, &txcmplq); - pring->txcmplq_cnt = 0; + /* Indicate the I/O queues are flushed */ + phba->hba_flag |= HBA_FCP_IOQ_FLUSH; spin_unlock_irq(&phba->hbalock); - /* Flush the txq */ - lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, - IOERR_SLI_DOWN); + /* Look on all the FCP Rings for the iotag */ + if (phba->sli_rev >= LPFC_SLI_REV4) { + for (i = 0; i < phba->cfg_fcp_io_channel; i++) { + pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; + + spin_lock_irq(&pring->ring_lock); + /* Retrieve everything on txq */ + list_splice_init(&pring->txq, &txq); + /* Retrieve everything on the txcmplq */ + list_splice_init(&pring->txcmplq, &txcmplq); + pring->txq_cnt = 0; + pring->txcmplq_cnt = 0; + spin_unlock_irq(&pring->ring_lock); + + /* Flush the txq */ + lpfc_sli_cancel_iocbs(phba, &txq, + IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + /* Flush the txcmpq */ + lpfc_sli_cancel_iocbs(phba, &txcmplq, + IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + } + } else { + pring = &psli->ring[psli->fcp_ring]; - /* Flush the txcmpq */ - lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, - IOERR_SLI_DOWN); + spin_lock_irq(&phba->hbalock); + /* Retrieve everything on txq */ + list_splice_init(&pring->txq, &txq); + /* Retrieve everything on the txcmplq */ + list_splice_init(&pring->txcmplq, &txcmplq); + pring->txq_cnt = 0; + pring->txcmplq_cnt = 0; + spin_unlock_irq(&phba->hbalock); + + /* Flush the txq */ + lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + /* Flush the txcmpq */ + lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + } } /** @@ -3869,10 +4031,10 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; pring->flag = 0; - pring->rspidx = 0; - pring->next_cmdidx = 0; - pring->local_getidx = 0; - pring->cmdidx = 0; + pring->sli.sli3.rspidx = 0; + pring->sli.sli3.next_cmdidx = 0; + pring->sli.sli3.local_getidx = 0; + pring->sli.sli3.cmdidx = 0; pring->missbufcnt = 0; } @@ -3895,11 +4057,13 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; uint16_t cfg_value; + int rc = 0; /* Reset HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "0295 Reset HBA Data: x%x x%x\n", - phba->pport->port_state, psli->sli_flag); + "0295 Reset HBA Data: x%x x%x x%x\n", + phba->pport->port_state, psli->sli_flag, + phba->hba_flag); /* perform board reset */ phba->fc_eventTag = 0; @@ -3912,6 +4076,12 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) phba->fcf.fcf_flag = 0; spin_unlock_irq(&phba->hbalock); + /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ + if (phba->hba_flag & HBA_FW_DUMP_OP) { + phba->hba_flag &= ~HBA_FW_DUMP_OP; + return rc; + } + /* Now physically reset the device */ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0389 Performing PCI function reset!\n"); @@ -3921,14 +4091,14 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); - /* Perform FCoE PCI function reset */ + /* Perform FCoE PCI function reset before freeing queue memory */ + rc = lpfc_pci_function_reset(phba); lpfc_sli4_queue_destroy(phba); - lpfc_pci_function_reset(phba); /* Restore PCI cmd register */ pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); - return 0; + return rc; } /** @@ -4020,6 +4190,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; uint32_t hba_aer_enabled; + int rc; /* Restart HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, @@ -4029,7 +4200,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) /* Take PCIe device Advanced Error Reporting (AER) state */ hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; - lpfc_sli4_brdreset(phba); + rc = lpfc_sli4_brdreset(phba); spin_lock_irq(&phba->hbalock); phba->pport->stopped = 0; @@ -4046,7 +4217,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) lpfc_hba_down_post(phba); - return 0; + return rc; } /** @@ -4504,7 +4675,8 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) } else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2708 This device does not support " - "Advanced Error Reporting (AER)\n"); + "Advanced Error Reporting (AER): %d\n", + rc); phba->cfg_aer_support = 0; } } @@ -4747,7 +4919,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, * is attached to. * * Return codes - * 0 - sucessful + * 0 - successful * otherwise - failed to retrieve physical port name **/ static int @@ -4895,24 +5067,30 @@ out_free_mboxq: static void lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) { - uint8_t fcp_eqidx; + int fcp_eqidx; lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); fcp_eqidx = 0; if (phba->sli4_hba.fcp_cq) { - do + do { lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], LPFC_QUEUE_REARM); - while (++fcp_eqidx < phba->cfg_fcp_eq_count); + } while (++fcp_eqidx < phba->cfg_fcp_io_channel); } - lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); - if (phba->sli4_hba.fp_eq) { - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; + + if (phba->cfg_fof) + lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM); + + if (phba->sli4_hba.hba_eq) { + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) - lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], + lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx], LPFC_QUEUE_REARM); } + + if (phba->cfg_fof) + lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM); } /** @@ -4985,7 +5163,12 @@ lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, &rsrc_info->u.rsp); *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, &rsrc_info->u.rsp); - err_exit: + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3162 Retrieved extents type-%d from port: count:%d, " + "size:%d\n", type, *extnt_count, *extnt_size); + +err_exit: mempool_free(mbox, phba->mbox_mem_pool); return rc; } @@ -5069,7 +5252,7 @@ lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) * 0: if successful **/ static int -lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt, +lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) { int rc = 0; @@ -5078,7 +5261,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt, uint32_t alloc_len, mbox_tmo; /* Calculate the total requested length of the dma memory */ - req_len = *extnt_cnt * sizeof(uint16_t); + req_len = extnt_cnt * sizeof(uint16_t); /* * Calculate the size of an embedded mailbox. The uint32_t @@ -5093,7 +5276,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt, */ *emb = LPFC_SLI4_MBX_EMBED; if (req_len > emb_len) { - req_len = *extnt_cnt * sizeof(uint16_t) + + req_len = extnt_cnt * sizeof(uint16_t) + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); *emb = LPFC_SLI4_MBX_NEMBED; @@ -5109,7 +5292,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt, "size (x%x)\n", alloc_len, req_len); return -ENOMEM; } - rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb); + rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); if (unlikely(rc)) return -EIO; @@ -5167,17 +5350,15 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) return -ENOMEM; } - lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT, - "2903 Available Resource Extents " - "for resource type 0x%x: Count: 0x%x, " - "Size 0x%x\n", type, rsrc_cnt, - rsrc_size); + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, + "2903 Post resource extents type-0x%x: " + "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; - rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox); + rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); if (unlikely(rc)) { rc = -EIO; goto err_exit; @@ -5268,6 +5449,7 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) rc = -ENOMEM; goto err_exit; } + phba->sli4_hba.max_cfg_param.xri_used = 0; phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt * sizeof(uint16_t), GFP_KERNEL); @@ -5434,11 +5616,11 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) list_del_init(&rsrc_blk->list); kfree(rsrc_blk); } + phba->sli4_hba.max_cfg_param.vpi_used = 0; break; case LPFC_RSC_TYPE_FCOE_XRI: kfree(phba->sli4_hba.xri_bmask); kfree(phba->sli4_hba.xri_ids); - bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, &phba->sli4_hba.lpfc_xri_blk_list, list) { list_del_init(&rsrc_blk->list); @@ -5576,6 +5758,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) } /* RPIs. */ count = phba->sli4_hba.max_cfg_param.max_rpi; + if (count <= 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3279 Invalid provisioning of " + "rpi:%d\n", count); + rc = -EINVAL; + goto err_exit; + } base = phba->sli4_hba.max_cfg_param.rpi_base; longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; phba->sli4_hba.rpi_bmask = kzalloc(longs * @@ -5598,6 +5787,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) /* VPIs. */ count = phba->sli4_hba.max_cfg_param.max_vpi; + if (count <= 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3280 Invalid provisioning of " + "vpi:%d\n", count); + rc = -EINVAL; + goto free_rpi_ids; + } base = phba->sli4_hba.max_cfg_param.vpi_base; longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; phba->vpi_bmask = kzalloc(longs * @@ -5620,6 +5816,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) /* XRIs. */ count = phba->sli4_hba.max_cfg_param.max_xri; + if (count <= 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3281 Invalid provisioning of " + "xri:%d\n", count); + rc = -EINVAL; + goto free_vpi_ids; + } base = phba->sli4_hba.max_cfg_param.xri_base; longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; phba->sli4_hba.xri_bmask = kzalloc(longs * @@ -5629,6 +5832,7 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) rc = -ENOMEM; goto free_vpi_ids; } + phba->sli4_hba.max_cfg_param.xri_used = 0; phba->sli4_hba.xri_ids = kzalloc(count * sizeof(uint16_t), GFP_KERNEL); @@ -5642,6 +5846,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) /* VFIs. */ count = phba->sli4_hba.max_cfg_param.max_vfi; + if (count <= 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3282 Invalid provisioning of " + "vfi:%d\n", count); + rc = -EINVAL; + goto free_xri_ids; + } base = phba->sli4_hba.max_cfg_param.vfi_base; longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; phba->sli4_hba.vfi_bmask = kzalloc(longs * @@ -5706,11 +5917,11 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); } else { kfree(phba->vpi_bmask); + phba->sli4_hba.max_cfg_param.vpi_used = 0; kfree(phba->vpi_ids); bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); kfree(phba->sli4_hba.xri_bmask); kfree(phba->sli4_hba.xri_ids); - bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); kfree(phba->sli4_hba.vfi_bmask); kfree(phba->sli4_hba.vfi_ids); bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); @@ -5869,6 +6080,148 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, } /** + * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block + * @phba: pointer to lpfc hba data structure. + * + * This routine walks the list of els buffers that have been allocated and + * repost them to the port by using SGL block post. This is needed after a + * pci_function_reset/warm_start or start. It attempts to construct blocks + * of els buffer sgls which contains contiguous xris and uses the non-embedded + * SGL block post mailbox commands to post them to the port. For single els + * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post + * mailbox command for posting. + * + * Returns: 0 = success, non-zero failure. + **/ +static int +lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) +{ + struct lpfc_sglq *sglq_entry = NULL; + struct lpfc_sglq *sglq_entry_next = NULL; + struct lpfc_sglq *sglq_entry_first = NULL; + int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0; + int last_xritag = NO_XRI; + LIST_HEAD(prep_sgl_list); + LIST_HEAD(blck_sgl_list); + LIST_HEAD(allc_sgl_list); + LIST_HEAD(post_sgl_list); + LIST_HEAD(free_sgl_list); + + spin_lock_irq(&phba->hbalock); + list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); + spin_unlock_irq(&phba->hbalock); + + total_cnt = phba->sli4_hba.els_xri_cnt; + list_for_each_entry_safe(sglq_entry, sglq_entry_next, + &allc_sgl_list, list) { + list_del_init(&sglq_entry->list); + block_cnt++; + if ((last_xritag != NO_XRI) && + (sglq_entry->sli4_xritag != last_xritag + 1)) { + /* a hole in xri block, form a sgl posting block */ + list_splice_init(&prep_sgl_list, &blck_sgl_list); + post_cnt = block_cnt - 1; + /* prepare list for next posting block */ + list_add_tail(&sglq_entry->list, &prep_sgl_list); + block_cnt = 1; + } else { + /* prepare list for next posting block */ + list_add_tail(&sglq_entry->list, &prep_sgl_list); + /* enough sgls for non-embed sgl mbox command */ + if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { + list_splice_init(&prep_sgl_list, + &blck_sgl_list); + post_cnt = block_cnt; + block_cnt = 0; + } + } + num_posted++; + + /* keep track of last sgl's xritag */ + last_xritag = sglq_entry->sli4_xritag; + + /* end of repost sgl list condition for els buffers */ + if (num_posted == phba->sli4_hba.els_xri_cnt) { + if (post_cnt == 0) { + list_splice_init(&prep_sgl_list, + &blck_sgl_list); + post_cnt = block_cnt; + } else if (block_cnt == 1) { + status = lpfc_sli4_post_sgl(phba, + sglq_entry->phys, 0, + sglq_entry->sli4_xritag); + if (!status) { + /* successful, put sgl to posted list */ + list_add_tail(&sglq_entry->list, + &post_sgl_list); + } else { + /* Failure, put sgl to free list */ + lpfc_printf_log(phba, KERN_WARNING, + LOG_SLI, + "3159 Failed to post els " + "sgl, xritag:x%x\n", + sglq_entry->sli4_xritag); + list_add_tail(&sglq_entry->list, + &free_sgl_list); + total_cnt--; + } + } + } + + /* continue until a nembed page worth of sgls */ + if (post_cnt == 0) + continue; + + /* post the els buffer list sgls as a block */ + status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list, + post_cnt); + + if (!status) { + /* success, put sgl list to posted sgl list */ + list_splice_init(&blck_sgl_list, &post_sgl_list); + } else { + /* Failure, put sgl list to free sgl list */ + sglq_entry_first = list_first_entry(&blck_sgl_list, + struct lpfc_sglq, + list); + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3160 Failed to post els sgl-list, " + "xritag:x%x-x%x\n", + sglq_entry_first->sli4_xritag, + (sglq_entry_first->sli4_xritag + + post_cnt - 1)); + list_splice_init(&blck_sgl_list, &free_sgl_list); + total_cnt -= post_cnt; + } + + /* don't reset xirtag due to hole in xri block */ + if (block_cnt == 0) + last_xritag = NO_XRI; + + /* reset els sgl post count for next round of posting */ + post_cnt = 0; + } + /* update the number of XRIs posted for ELS */ + phba->sli4_hba.els_xri_cnt = total_cnt; + + /* free the els sgls failed to post */ + lpfc_free_sgl_list(phba, &free_sgl_list); + + /* push els sgls posted to the availble list */ + if (!list_empty(&post_sgl_list)) { + spin_lock_irq(&phba->hbalock); + list_splice_init(&post_sgl_list, + &phba->sli4_hba.lpfc_sgl_list); + spin_unlock_irq(&phba->hbalock); + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3161 Failure to post els sgl to port.\n"); + return -EIO; + } + return 0; +} + +/** * lpfc_sli4_hba_setup - SLI4 device intialization PCI function * @phba: Pointer to HBA context object. * @@ -5926,6 +6279,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) kfree(vpd); goto out_free_mbox; } + mqe = &mboxq->u.mqe; phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) @@ -5939,6 +6293,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) else phba->hba_flag &= ~HBA_FIP_SUPPORT; + phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; + if (phba->sli_rev != LPFC_SLI_REV4) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "0376 READ_REV Error. SLI Level %d " @@ -6010,6 +6366,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); + /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ + rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); + if (phba->pport->cfg_lun_queue_depth > rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3362 LUN queue depth changed from %d to %d\n", + phba->pport->cfg_lun_queue_depth, rc); + phba->pport->cfg_lun_queue_depth = rc; + } + + /* * Discover the port's supported feature set and match it against the * hosts requests. @@ -6079,8 +6445,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) "rc = x%x\n", rc); goto out_free_mbox; } - /* update physical xri mappings in the scsi buffers */ - lpfc_scsi_buf_update(phba); /* Read the port's service parameters. */ rc = lpfc_read_sparam(phba, mboxq, vport->vpi); @@ -6121,28 +6485,26 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); - /* Register SGL pool to the device using non-embedded mailbox command */ - if (!phba->sli4_hba.extents_in_use) { - rc = lpfc_sli4_post_els_sgl_list(phba); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0582 Error %d during els sgl post " - "operation\n", rc); - rc = -ENODEV; - goto out_free_mbox; - } - } else { - rc = lpfc_sli4_post_els_sgl_list_ext(phba); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "2560 Error %d during els sgl post " - "operation\n", rc); - rc = -ENODEV; - goto out_free_mbox; - } + /* update host els and scsi xri-sgl sizes and mappings */ + rc = lpfc_sli4_xri_sgl_update(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "1400 Failed to update xri-sgl size and " + "mapping: %d\n", rc); + goto out_free_mbox; } - /* Register SCSI SGL pool to the device */ + /* register the els sgl pool to the port */ + rc = lpfc_sli4_repost_els_sgl_list(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0582 Error %d during els sgl post " + "operation\n", rc); + rc = -ENODEV; + goto out_free_mbox; + } + + /* register the allocated scsi sgl pool to the port */ rc = lpfc_sli4_repost_scsi_sgl_list(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, @@ -6163,6 +6525,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = -ENODEV; goto out_free_mbox; } + lpfc_sli4_node_prep(phba); /* Create all the SLI4 queues */ rc = lpfc_sli4_queue_create(phba); @@ -6200,16 +6563,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start the ELS watchdog timer */ mod_timer(&vport->els_tmofunc, - jiffies + HZ * (phba->fc_ratov * 2)); + jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); /* Start heart beat timer */ mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); phba->hb_outstanding = 0; phba->last_completion_time = jiffies; /* Start error attention (ERATT) polling timer */ - mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); /* Enable PCIe device Advanced Error Reporting (AER) if configured */ if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { @@ -6318,6 +6682,108 @@ lpfc_mbox_timeout(unsigned long ptr) return; } +/** + * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions + * are pending + * @phba: Pointer to HBA context object. + * + * This function checks if any mailbox completions are present on the mailbox + * completion queue. + **/ +bool +lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) +{ + + uint32_t idx; + struct lpfc_queue *mcq; + struct lpfc_mcqe *mcqe; + bool pending_completions = false; + + if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) + return false; + + /* Check for completions on mailbox completion queue */ + + mcq = phba->sli4_hba.mbx_cq; + idx = mcq->hba_index; + while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) { + mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; + if (bf_get_le32(lpfc_trailer_completed, mcqe) && + (!bf_get_le32(lpfc_trailer_async, mcqe))) { + pending_completions = true; + break; + } + idx = (idx + 1) % mcq->entry_count; + if (mcq->hba_index == idx) + break; + } + return pending_completions; + +} + +/** + * lpfc_sli4_process_missed_mbox_completions - process mbox completions + * that were missed. + * @phba: Pointer to HBA context object. + * + * For sli4, it is possible to miss an interrupt. As such mbox completions + * maybe missed causing erroneous mailbox timeouts to occur. This function + * checks to see if mbox completions are on the mailbox completion queue + * and will process all the completions associated with the eq for the + * mailbox completion queue. + **/ +bool +lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) +{ + + uint32_t eqidx; + struct lpfc_queue *fpeq = NULL; + struct lpfc_eqe *eqe; + bool mbox_pending; + + if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) + return false; + + /* Find the eq associated with the mcq */ + + if (phba->sli4_hba.hba_eq) + for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++) + if (phba->sli4_hba.hba_eq[eqidx]->queue_id == + phba->sli4_hba.mbx_cq->assoc_qid) { + fpeq = phba->sli4_hba.hba_eq[eqidx]; + break; + } + if (!fpeq) + return false; + + /* Turn off interrupts from this EQ */ + + lpfc_sli4_eq_clr_intr(fpeq); + + /* Check to see if a mbox completion is pending */ + + mbox_pending = lpfc_sli4_mbox_completions_pending(phba); + + /* + * If a mbox completion is pending, process all the events on EQ + * associated with the mbox completion queue (this could include + * mailbox commands, async events, els commands, receive queue data + * and fcp commands) + */ + + if (mbox_pending) + while ((eqe = lpfc_sli4_eq_get(fpeq))) { + lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx); + fpeq->EQ_processed++; + } + + /* Always clear and re-arm the EQ */ + + lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); + + return mbox_pending; + +} /** * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout @@ -6333,7 +6799,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; MAILBOX_t *mb = &pmbox->u.mb; struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; + + /* If the mailbox completed, process the completion and return */ + if (lpfc_sli4_process_missed_mbox_completions(phba)) + return; /* Check the pmbox pointer first. There is a race condition * between the mbox timeout handler getting executed in the @@ -6371,8 +6840,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_sli_abort_fcp_rings(phba); lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "0345 Resetting board due to mailbox timeout\n"); @@ -6411,7 +6879,7 @@ static int lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) { - MAILBOX_t *mb; + MAILBOX_t *mbx; struct lpfc_sli *psli = &phba->sli; uint32_t status, evtctr; uint32_t ha_copy, hc_copy; @@ -6465,7 +6933,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, psli = &phba->sli; - mb = &pmbox->u.mb; + mbx = &pmbox->u.mb; status = MBX_SUCCESS; if (phba->link_state == LPFC_HBA_ERROR) { @@ -6480,7 +6948,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, goto out_not_finished; } - if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { + if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { if (lpfc_readl(phba->HCregaddr, &hc_copy) || !(hc_copy & HC_MBINT_ENA)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); @@ -6534,7 +7002,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, "(%d):0308 Mbox cmd issue - BUSY Data: " "x%x x%x x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0xffffff, - mb->mbxCommand, phba->pport->port_state, + mbx->mbxCommand, phba->pport->port_state, psli->sli_flag, flag); psli->slistat.mbox_busy++; @@ -6544,15 +7012,15 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, lpfc_debugfs_disc_trc(pmbox->vport, LPFC_DISC_TRC_MBOX_VPORT, "MBOX Bsy vport: cmd:x%x mb:x%x x%x", - (uint32_t)mb->mbxCommand, - mb->un.varWords[0], mb->un.varWords[1]); + (uint32_t)mbx->mbxCommand, + mbx->un.varWords[0], mbx->un.varWords[1]); } else { lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_MBOX, "MBOX Bsy: cmd:x%x mb:x%x x%x", - (uint32_t)mb->mbxCommand, - mb->un.varWords[0], mb->un.varWords[1]); + (uint32_t)mbx->mbxCommand, + mbx->un.varWords[0], mbx->un.varWords[1]); } return MBX_BUSY; @@ -6563,7 +7031,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, /* If we are not polling, we MUST be in SLI2 mode */ if (flag != MBX_POLL) { if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && - (mb->mbxCommand != MBX_KILL_BOARD)) { + (mbx->mbxCommand != MBX_KILL_BOARD)) { psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ @@ -6576,8 +7044,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, goto out_not_finished; } /* timeout active mbox command */ - mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, pmbox)))); + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * + 1000); + mod_timer(&psli->mbox_tmo, jiffies + timeout); } /* Mailbox cmd <cmd> issue */ @@ -6585,23 +7054,23 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " "x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, - mb->mbxCommand, phba->pport->port_state, + mbx->mbxCommand, phba->pport->port_state, psli->sli_flag, flag); - if (mb->mbxCommand != MBX_HEARTBEAT) { + if (mbx->mbxCommand != MBX_HEARTBEAT) { if (pmbox->vport) { lpfc_debugfs_disc_trc(pmbox->vport, LPFC_DISC_TRC_MBOX_VPORT, "MBOX Send vport: cmd:x%x mb:x%x x%x", - (uint32_t)mb->mbxCommand, - mb->un.varWords[0], mb->un.varWords[1]); + (uint32_t)mbx->mbxCommand, + mbx->un.varWords[0], mbx->un.varWords[1]); } else { lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_MBOX, "MBOX Send: cmd:x%x mb:x%x x%x", - (uint32_t)mb->mbxCommand, - mb->un.varWords[0], mb->un.varWords[1]); + (uint32_t)mbx->mbxCommand, + mbx->un.varWords[0], mbx->un.varWords[1]); } } @@ -6609,12 +7078,12 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, evtctr = psli->slistat.mbox_event; /* next set own bit for the adapter and copy over command word */ - mb->mbxOwner = OWN_CHIP; + mbx->mbxOwner = OWN_CHIP; if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* Populate mbox extension offset word. */ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { - *(((uint32_t *)mb) + pmbox->mbox_offset_word) + *(((uint32_t *)mbx) + pmbox->mbox_offset_word) = (uint8_t *)phba->mbox_ext - (uint8_t *)phba->mbox; } @@ -6626,11 +7095,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, pmbox->in_ext_byte_len); } /* Copy command data to host SLIM area */ - lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); + lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); } else { /* Populate mbox extension offset word. */ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) - *(((uint32_t *)mb) + pmbox->mbox_offset_word) + *(((uint32_t *)mbx) + pmbox->mbox_offset_word) = MAILBOX_HBA_EXT_OFFSET; /* Copy the mailbox extension data */ @@ -6640,24 +7109,24 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, pmbox->context2, pmbox->in_ext_byte_len); } - if (mb->mbxCommand == MBX_CONFIG_PORT) { + if (mbx->mbxCommand == MBX_CONFIG_PORT) { /* copy command data into host mbox for cmpl */ - lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); + lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); } /* First copy mbox command data to HBA SLIM, skip past first word */ to_slim = phba->MBslimaddr + sizeof (uint32_t); - lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], + lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], MAILBOX_CMD_SIZE - sizeof (uint32_t)); /* Next copy over first word, with mbxOwner set */ - ldata = *((uint32_t *)mb); + ldata = *((uint32_t *)mbx); to_slim = phba->MBslimaddr; writel(ldata, to_slim); readl(to_slim); /* flush */ - if (mb->mbxCommand == MBX_CONFIG_PORT) { + if (mbx->mbxCommand == MBX_CONFIG_PORT) { /* switch over to host mailbox */ psli->sli_flag |= LPFC_SLI_ACTIVE; } @@ -6732,7 +7201,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, /* First copy command data */ word0 = *((uint32_t *)phba->mbox); word0 = le32_to_cpu(word0); - if (mb->mbxCommand == MBX_CONFIG_PORT) { + if (mbx->mbxCommand == MBX_CONFIG_PORT) { MAILBOX_t *slimmb; uint32_t slimword0; /* Check real SLIM for any errors */ @@ -6759,7 +7228,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* copy results back to user */ - lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); + lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE); /* Copy the mailbox extension data */ if (pmbox->out_ext_byte_len && pmbox->context2) { lpfc_sli_pcimem_bcopy(phba->mbox_ext, @@ -6768,7 +7237,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, } } else { /* First copy command data */ - lpfc_memcpy_from_slim(mb, phba->MBslimaddr, + lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, MAILBOX_CMD_SIZE); /* Copy the mailbox extension data */ if (pmbox->out_ext_byte_len && pmbox->context2) { @@ -6783,7 +7252,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, readl(phba->HAregaddr); /* flush */ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - status = mb->mbxStatus; + status = mbx->mbxStatus; } spin_unlock_irqrestore(&phba->hbalock, drvr_flag); @@ -6828,6 +7297,10 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 1000) + jiffies; spin_unlock_irq(&phba->hbalock); + /* Make sure the mailbox is really active */ + if (timeout) + lpfc_sli4_process_missed_mbox_completions(phba); + /* Wait for the outstnading mailbox command to complete */ while (phba->sli.mbox_active) { /* Check active mailbox complete status every 2ms */ @@ -6884,6 +7357,40 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) } /** + * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready + * @phba: Pointer to HBA context object. + * @mboxq: Pointer to mailbox object. + * + * The function waits for the bootstrap mailbox register ready bit from + * port for twice the regular mailbox command timeout value. + * + * 0 - no timeout on waiting for bootstrap mailbox register ready. + * MBXERR_ERROR - wait for bootstrap mailbox register timed out. + **/ +static int +lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + uint32_t db_ready; + unsigned long timeout; + struct lpfc_register bmbx_reg; + + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) + * 1000) + jiffies; + + do { + bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); + db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); + if (!db_ready) + msleep(2); + + if (time_after(jiffies, timeout)) + return MBXERR_ERROR; + } while (!db_ready); + + return 0; +} + +/** * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox * @phba: Pointer to HBA context object. * @mboxq: Pointer to mailbox object. @@ -6904,15 +7411,12 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { int rc = MBX_SUCCESS; unsigned long iflag; - uint32_t db_ready; uint32_t mcqe_status; uint32_t mbx_cmnd; - unsigned long timeout; struct lpfc_sli *psli = &phba->sli; struct lpfc_mqe *mb = &mboxq->u.mqe; struct lpfc_bmbx_create *mbox_rgn; struct dma_address *dma_address; - struct lpfc_register bmbx_reg; /* * Only one mailbox can be active to the bootstrap mailbox region @@ -6936,6 +7440,11 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) phba->sli.mbox_active = mboxq; spin_unlock_irqrestore(&phba->hbalock, iflag); + /* wait for bootstrap mbox register for readyness */ + rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); + if (rc) + goto exit; + /* * Initialize the bootstrap memory region to avoid stale data areas * in the mailbox post. Then copy the caller's mailbox contents to @@ -6950,35 +7459,18 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) dma_address = &phba->sli4_hba.bmbx.dma_address; writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) - * 1000) + jiffies; - do { - bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); - db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); - if (!db_ready) - msleep(2); - - if (time_after(jiffies, timeout)) { - rc = MBXERR_ERROR; - goto exit; - } - } while (!db_ready); + /* wait for bootstrap mbox register for hi-address write done */ + rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); + if (rc) + goto exit; /* Post the low mailbox dma address to the port. */ writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) - * 1000) + jiffies; - do { - bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); - db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); - if (!db_ready) - msleep(2); - if (time_after(jiffies, timeout)) { - rc = MBXERR_ERROR; - goto exit; - } - } while (!db_ready); + /* wait for bootstrap mbox register for low address write done */ + rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); + if (rc) + goto exit; /* * Read the CQ to ensure the mailbox has completed. @@ -7075,14 +7567,19 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, if (rc != MBX_SUCCESS) lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, "(%d):2541 Mailbox command x%x " - "(x%x/x%x) cannot issue Data: " - "x%x x%x\n", + "(x%x/x%x) failure: " + "mqe_sta: x%x mcqe_sta: x%x/x%x " + "Data: x%x x%x\n,", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, mboxq), lpfc_sli_config_mbox_opcode_get(phba, mboxq), + bf_get(lpfc_mqe_status, &mboxq->u.mqe), + bf_get(lpfc_mcqe_status, &mboxq->mcqe), + bf_get(lpfc_mcqe_ext_status, + &mboxq->mcqe), psli->sli_flag, flag); return rc; } else if (flag == MBX_POLL) { @@ -7101,18 +7598,22 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, /* Successfully blocked, now issue sync mbox cmd */ rc = lpfc_sli4_post_sync_mbox(phba, mboxq); if (rc != MBX_SUCCESS) - lpfc_printf_log(phba, KERN_ERR, + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, - "(%d):2597 Mailbox command " - "x%x (x%x/x%x) cannot issue " - "Data: x%x x%x\n", - mboxq->vport ? - mboxq->vport->vpi : 0, + "(%d):2597 Sync Mailbox command " + "x%x (x%x/x%x) failure: " + "mqe_sta: x%x mcqe_sta: x%x/x%x " + "Data: x%x x%x\n,", + mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, mboxq), lpfc_sli_config_mbox_opcode_get(phba, mboxq), + bf_get(lpfc_mqe_status, &mboxq->u.mqe), + bf_get(lpfc_mcqe_status, &mboxq->mcqe), + bf_get(lpfc_mcqe_ext_status, + &mboxq->mcqe), psli->sli_flag, flag); /* Unblock the async mailbox posting afterward */ lpfc_sli4_async_mbox_unblock(phba); @@ -7222,7 +7723,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) /* Start timer for the mbox_tmo and log some mailbox post messages */ mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, mboxq)))); + msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " @@ -7267,11 +7768,13 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) out_not_finished: spin_lock_irqsave(&phba->hbalock, iflags); - mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; - __lpfc_mbox_cmpl_put(phba, mboxq); - /* Release the token */ - psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - phba->sli.mbox_active = NULL; + if (phba->sli.mbox_active) { + mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; + __lpfc_mbox_cmpl_put(phba, mboxq); + /* Release the token */ + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + } spin_unlock_irqrestore(&phba->hbalock, iflags); return MBX_NOT_FINISHED; @@ -7351,7 +7854,6 @@ __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, { /* Insert the caller's iocb in the txq tail for later processing. */ list_add_tail(&piocb->list, &pring->txq); - pring->txq_cnt++; } /** @@ -7555,6 +8057,8 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, sgl = (struct sli4_sge *)sglq->sgl; icmd = &piocbq->iocb; + if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) + return sglq->sli4_xritag; if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { numBdes = icmd->un.genreq64.bdl.bdeSize / sizeof(struct ulp_bde64); @@ -7634,14 +8138,25 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, * * Return: index into SLI4 fast-path FCP queue index. **/ -static uint32_t +static inline uint32_t lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) { - ++phba->fcp_qidx; - if (phba->fcp_qidx >= phba->cfg_fcp_wq_count) - phba->fcp_qidx = 0; - - return phba->fcp_qidx; + struct lpfc_vector_map_info *cpup; + int chann, cpu; + + if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU + && phba->cfg_fcp_io_channel > 1) { + cpu = smp_processor_id(); + if (cpu < phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + cpup += cpu; + return cpup->channel_id; + } + chann = cpu; + } + chann = atomic_add_return(1, &phba->fcp_qidx); + chann = (chann % phba->cfg_fcp_io_channel); + return chann; } /** @@ -7693,6 +8208,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, abort_tag = (uint32_t) iocbq->iotag; xritag = iocbq->sli4_xritag; wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ + wqe->generic.wqe_com.word10 = 0; /* words0-2 bpl convert bde */ if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / @@ -7723,7 +8239,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, switch (iocbq->iocb.ulpCommand) { case CMD_ELS_REQUEST64_CR: - ndlp = (struct lpfc_nodelist *)iocbq->context1; + if (iocbq->iocb_flag & LPFC_IO_LIBDFC) + ndlp = iocbq->context_un.ndlp; + else + ndlp = (struct lpfc_nodelist *)iocbq->context1; if (!iocbq->iocb.ulpLe) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2007 Only Limited Edition cmd Format" @@ -7756,14 +8275,20 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { if (pcmd && (*pcmd == ELS_CMD_FLOGI || *pcmd == ELS_CMD_SCR || + *pcmd == ELS_CMD_FDISC || + *pcmd == ELS_CMD_LOGO || *pcmd == ELS_CMD_PLOGI)) { bf_set(els_req64_sp, &wqe->els_req, 1); bf_set(els_req64_sid, &wqe->els_req, iocbq->vport->fc_myDID); + if ((*pcmd == ELS_CMD_FLOGI) && + !(phba->fc_topology == + LPFC_TOPOLOGY_LOOP)) + bf_set(els_req64_sid, &wqe->els_req, 0); bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, - phba->vpi_ids[phba->pport->vpi]); - } else if (iocbq->context1) { + phba->vpi_ids[iocbq->vport->vpi]); + } else if (pcmd && iocbq->context1) { bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); @@ -7777,6 +8302,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); + wqe->els_req.max_response_payload_len = total_len - xmit_len; break; case CMD_XMIT_SEQUENCE64_CX: bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, @@ -7821,8 +8347,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, command_type = FCP_COMMAND_DATA_OUT; /* word3 iocb=iotag wqe=payload_offset_len */ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ - wqe->fcp_iwrite.payload_offset_len = - xmit_len + sizeof(struct fcp_rsp); + bf_set(payload_offset_len, &wqe->fcp_iwrite, + xmit_len + sizeof(struct fcp_rsp)); + bf_set(cmd_buff_len, &wqe->fcp_iwrite, + 0); /* word4 iocb=parameter wqe=total_xfer_length memcpy */ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, @@ -7830,18 +8358,28 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); /* Always open the exchange */ bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); - bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4); bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); + bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); + if (iocbq->iocb_flag & LPFC_IO_OAS) { + bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); + if (phba->cfg_XLanePriority) { + bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); + bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, + (phba->cfg_XLanePriority << 1)); + } + } break; case CMD_FCP_IREAD64_CR: /* word3 iocb=iotag wqe=payload_offset_len */ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ - wqe->fcp_iread.payload_offset_len = - xmit_len + sizeof(struct fcp_rsp); + bf_set(payload_offset_len, &wqe->fcp_iread, + xmit_len + sizeof(struct fcp_rsp)); + bf_set(cmd_buff_len, &wqe->fcp_iread, + 0); /* word4 iocb=parameter wqe=total_xfer_length memcpy */ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, @@ -7849,16 +8387,29 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); /* Always open the exchange */ bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); - bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4); bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); + bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); + if (iocbq->iocb_flag & LPFC_IO_OAS) { + bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); + if (phba->cfg_XLanePriority) { + bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); + bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, + (phba->cfg_XLanePriority << 1)); + } + } break; case CMD_FCP_ICMND64_CR: + /* word3 iocb=iotag wqe=payload_offset_len */ + /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ + bf_set(payload_offset_len, &wqe->fcp_icmd, + xmit_len + sizeof(struct fcp_rsp)); + bf_set(cmd_buff_len, &wqe->fcp_icmd, + 0); /* word3 iocb=IO_TAG wqe=reserved */ - wqe->fcp_icmd.rsrvd3 = 0; bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); /* Always open the exchange */ bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); @@ -7868,6 +8419,16 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); + bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, + iocbq->iocb.ulpFCP2Rcvy); + if (iocbq->iocb_flag & LPFC_IO_OAS) { + bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); + if (phba->cfg_XLanePriority) { + bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); + bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, + (phba->cfg_XLanePriority << 1)); + } + } break; case CMD_GEN_REQUEST64_CR: /* For this command calculate the xmit length of the @@ -7902,6 +8463,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); + wqe->gen_req.max_response_payload_len = total_len - xmit_len; command_type = OTHER_COMMAND; break; case CMD_XMIT_ELS_RSP64_CX: @@ -7909,11 +8471,25 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, /* words0-2 BDE memcpy */ /* word3 iocb=iotag32 wqe=response_payload_len */ wqe->xmit_els_rsp.response_payload_len = xmit_len; - /* word4 iocb=did wge=rsvd. */ - wqe->xmit_els_rsp.rsvd4 = 0; + /* word4 */ + wqe->xmit_els_rsp.word4 = 0; /* word5 iocb=rsvd wge=did */ bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, - iocbq->iocb.un.elsreq64.remoteID); + iocbq->iocb.un.xseq64.xmit_els_remoteID); + + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); + if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { + if (iocbq->vport->fc_flag & FC_PT2PT) { + bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); + bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, + iocbq->vport->fc_myDID); + if (iocbq->vport->fc_myDID == Fabric_DID) { + bf_set(wqe_els_did, + &wqe->xmit_els_rsp.wqe_dest, 0); + } + } + } bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); @@ -7933,11 +8509,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, pcmd = (uint32_t *) (((struct lpfc_dmabuf *) iocbq->context2)->virt); if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { - bf_set(els_req64_sp, &wqe->els_req, 1); - bf_set(els_req64_sid, &wqe->els_req, + bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); + bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, iocbq->vport->fc_myDID); - bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); - bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, + bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); + bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, phba->vpi_ids[phba->pport->vpi]); } command_type = OTHER_COMMAND; @@ -7982,6 +8558,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, xritag = 0; break; case CMD_XMIT_BLS_RSP64_CX: + ndlp = (struct lpfc_nodelist *)iocbq->context1; /* As BLS ABTS RSP WQE is very different from other WQEs, * we re-construct this WQE here based on information in * iocbq from scratch. @@ -8008,8 +8585,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, } bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); + + /* Use CT=VPI */ + bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, + ndlp->nlp_DID); + bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, + iocbq->iocb.ulpContext); + bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, - iocbq->iocb.ulpContext); + phba->vpi_ids[phba->pport->vpi]); bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, LPFC_WQE_LENLOC_NONE); @@ -8039,6 +8623,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, break; } + if (iocbq->iocb_flag & LPFC_IO_DIF_PASS) + bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); + else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP) + bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); + else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT) + bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); + iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | + LPFC_IO_DIF_INSERT); bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); wqe->generic.wqe_com.abort_tag = abort_tag; @@ -8069,15 +8661,15 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, { struct lpfc_sglq *sglq; union lpfc_wqe wqe; + struct lpfc_queue *wq; struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; if (piocb->sli4_xritag == NO_XRI) { if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || - piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || - piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX) + piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) sglq = NULL; else { - if (pring->txq_cnt) { + if (!list_empty(&pring->txq)) { if (!(flag & SLI_IOCB_RET_IOCB)) { __lpfc_sli_ringtx_put(phba, pring, piocb); @@ -8106,7 +8698,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, * This is a continuation of a commandi,(CX) so this * sglq is on the active list */ - sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); + sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); if (!sglq) return IOCB_ERROR; } @@ -8122,21 +8714,17 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, return IOCB_ERROR; if ((piocb->iocb_flag & LPFC_IO_FCP) || - (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { - /* - * For FCP command IOCB, get a new WQ index to distribute - * WQE across the WQsr. On the other hand, for abort IOCB, - * it carries the same WQ index to the original command - * IOCB. - */ - if (piocb->iocb_flag & LPFC_IO_FCP) - piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); - if (unlikely(!phba->sli4_hba.fcp_wq)) - return IOCB_ERROR; - if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], - &wqe)) + (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { + if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) { + wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx]; + } else { + wq = phba->sli4_hba.oas_wq; + } + if (lpfc_sli4_wq_put(wq, &wqe)) return IOCB_ERROR; } else { + if (unlikely(!phba->sli4_hba.els_wq)) + return IOCB_ERROR; if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) return IOCB_ERROR; } @@ -8213,13 +8801,76 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { + struct lpfc_fcp_eq_hdl *fcp_eq_hdl; + struct lpfc_sli_ring *pring; + struct lpfc_queue *fpeq; + struct lpfc_eqe *eqe; unsigned long iflags; - int rc; + int rc, idx; - spin_lock_irqsave(&phba->hbalock, iflags); - rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); - spin_unlock_irqrestore(&phba->hbalock, iflags); + if (phba->sli_rev == LPFC_SLI_REV4) { + if (piocb->iocb_flag & LPFC_IO_FCP) { + if (!phba->cfg_fof || (!(piocb->iocb_flag & + LPFC_IO_OAS))) { + if (unlikely(!phba->sli4_hba.fcp_wq)) + return IOCB_ERROR; + idx = lpfc_sli4_scmd_to_wqidx_distr(phba); + piocb->fcp_wqidx = idx; + ring_number = MAX_SLI3_CONFIGURED_RINGS + idx; + } else { + if (unlikely(!phba->sli4_hba.oas_wq)) + return IOCB_ERROR; + idx = 0; + piocb->fcp_wqidx = 0; + ring_number = LPFC_FCP_OAS_RING; + } + pring = &phba->sli.ring[ring_number]; + spin_lock_irqsave(&pring->ring_lock, iflags); + rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, + flag); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + + if (lpfc_fcp_look_ahead) { + fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx]; + + if (atomic_dec_and_test(&fcp_eq_hdl-> + fcp_eq_in_use)) { + + /* Get associated EQ with this index */ + fpeq = phba->sli4_hba.hba_eq[idx]; + /* Turn off interrupts from this EQ */ + lpfc_sli4_eq_clr_intr(fpeq); + + /* + * Process all the events on FCP EQ + */ + while ((eqe = lpfc_sli4_eq_get(fpeq))) { + lpfc_sli4_hba_handle_eqe(phba, + eqe, idx); + fpeq->EQ_processed++; + } + + /* Always clear and re-arm the EQ */ + lpfc_sli4_eq_release(fpeq, + LPFC_QUEUE_REARM); + } + atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); + } + } else { + pring = &phba->sli.ring[ring_number]; + spin_lock_irqsave(&pring->ring_lock, iflags); + rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, + flag); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + + } + } else { + /* For now, SLI2/3 will still use hbalock */ + spin_lock_irqsave(&phba->hbalock, iflags); + rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); + spin_unlock_irqrestore(&phba->hbalock, iflags); + } return rc; } @@ -8246,18 +8897,18 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba) /* Take some away from the FCP ring */ pring = &psli->ring[psli->fcp_ring]; - pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; - pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; - pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; - pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; + pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; + pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; + pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; + pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; /* and give them to the extra ring */ pring = &psli->ring[psli->extra_ring]; - pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; - pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; - pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; - pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; + pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; + pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; + pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; + pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; /* Setup default profile for this ring */ pring->iotag_max = 4096; @@ -8269,56 +8920,6 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba) return 0; } -/* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS. - * @vport: pointer to virtual port object. - * @ndlp: nodelist pointer for the impacted rport. - * - * The driver calls this routine in response to a XRI ABORT CQE - * event from the port. In this event, the driver is required to - * recover its login to the rport even though its login may be valid - * from the driver's perspective. The failed ABTS notice from the - * port indicates the rport is not responding. - */ -static void -lpfc_sli_abts_recover_port(struct lpfc_vport *vport, - struct lpfc_nodelist *ndlp) -{ - struct Scsi_Host *shost; - struct lpfc_hba *phba; - unsigned long flags = 0; - - shost = lpfc_shost_from_vport(vport); - phba = vport->phba; - if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { - lpfc_printf_log(phba, KERN_INFO, - LOG_SLI, "3093 No rport recovery needed. " - "rport in state 0x%x\n", - ndlp->nlp_state); - return; - } - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "3094 Start rport recovery on shost id 0x%x " - "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " - "flags 0x%x\n", - shost->host_no, ndlp->nlp_DID, - vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, - ndlp->nlp_flag); - /* - * The rport is not responding. Don't attempt ADISC recovery. - * Remove the FCP-2 flag to force a PLOGI. - */ - spin_lock_irqsave(shost->host_lock, flags); - ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - spin_unlock_irqrestore(shost->host_lock, flags); - lpfc_disc_state_machine(vport, ndlp, NULL, - NLP_EVT_DEVICE_RECOVERY); - lpfc_cancel_retry_delay_tmo(vport, ndlp); - spin_lock_irqsave(shost->host_lock, flags); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irqrestore(shost->host_lock, flags); - lpfc_disc_start(vport); -} - /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. * @phba: Pointer to HBA context object. * @iocbq: Pointer to iocb object. @@ -8383,20 +8984,32 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, struct sli4_wcqe_xri_aborted *axri) { struct lpfc_vport *vport; + uint32_t ext_status = 0; - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3115 Node Context not found, driver " "ignoring abts err event\n"); + return; + } + vport = ndlp->vport; lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3116 Port generated FCP XRI ABORT event on " - "vpi %d rpi %d xri x%x status 0x%x\n", - ndlp->vport->vpi, ndlp->nlp_rpi, + "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", + ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], bf_get(lpfc_wcqe_xa_xri, axri), - bf_get(lpfc_wcqe_xa_status, axri)); + bf_get(lpfc_wcqe_xa_status, axri), + axri->parameter); - if (bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) + /* + * Catch the ABTS protocol failure case. Older OCe FW releases returned + * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and + * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. + */ + ext_status = axri->parameter & IOERR_PARAM_MASK; + if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && + ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) lpfc_sli_abts_recover_port(vport, ndlp); } @@ -8492,7 +9105,9 @@ lpfc_sli_setup(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; - psli->num_rings = MAX_CONFIGURED_RINGS; + psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; + if (phba->sli_rev == LPFC_SLI_REV4) + psli->num_rings += phba->cfg_fcp_io_channel; psli->sli_flag = 0; psli->fcp_ring = LPFC_FCP_RING; psli->next_ring = LPFC_FCP_NEXT_RING; @@ -8507,16 +9122,20 @@ lpfc_sli_setup(struct lpfc_hba *phba) switch (i) { case LPFC_FCP_RING: /* ring 0 - FCP */ /* numCiocb and numRiocb are used in config_port */ - pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; - pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; - pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; - pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; - pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; - pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; - pring->sizeCiocb = (phba->sli_rev == 3) ? + pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; + pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; + pring->sli.sli3.numCiocb += + SLI2_IOCB_CMD_R1XTRA_ENTRIES; + pring->sli.sli3.numRiocb += + SLI2_IOCB_RSP_R1XTRA_ENTRIES; + pring->sli.sli3.numCiocb += + SLI2_IOCB_CMD_R3XTRA_ENTRIES; + pring->sli.sli3.numRiocb += + SLI2_IOCB_RSP_R3XTRA_ENTRIES; + pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? SLI3_IOCB_CMD_SIZE : SLI2_IOCB_CMD_SIZE; - pring->sizeRiocb = (phba->sli_rev == 3) ? + pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? SLI3_IOCB_RSP_SIZE : SLI2_IOCB_RSP_SIZE; pring->iotag_ctr = 0; @@ -8527,12 +9146,12 @@ lpfc_sli_setup(struct lpfc_hba *phba) break; case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ /* numCiocb and numRiocb are used in config_port */ - pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; - pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; - pring->sizeCiocb = (phba->sli_rev == 3) ? + pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; + pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; + pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? SLI3_IOCB_CMD_SIZE : SLI2_IOCB_CMD_SIZE; - pring->sizeRiocb = (phba->sli_rev == 3) ? + pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? SLI3_IOCB_RSP_SIZE : SLI2_IOCB_RSP_SIZE; pring->iotag_max = phba->cfg_hba_queue_depth; @@ -8540,12 +9159,12 @@ lpfc_sli_setup(struct lpfc_hba *phba) break; case LPFC_ELS_RING: /* ring 2 - ELS / CT */ /* numCiocb and numRiocb are used in config_port */ - pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; - pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; - pring->sizeCiocb = (phba->sli_rev == 3) ? + pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; + pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; + pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? SLI3_IOCB_CMD_SIZE : SLI2_IOCB_CMD_SIZE; - pring->sizeRiocb = (phba->sli_rev == 3) ? + pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? SLI3_IOCB_RSP_SIZE : SLI2_IOCB_RSP_SIZE; pring->fast_iotag = 0; @@ -8578,16 +9197,11 @@ lpfc_sli_setup(struct lpfc_hba *phba) pring->prt[3].type = FC_TYPE_CT; pring->prt[3].lpfc_sli_rcv_unsol_event = lpfc_ct_unsol_event; - /* abort unsolicited sequence */ - pring->prt[4].profile = 0; /* Mask 4 */ - pring->prt[4].rctl = FC_RCTL_BA_ABTS; - pring->prt[4].type = FC_TYPE_BLS; - pring->prt[4].lpfc_sli_rcv_unsol_event = - lpfc_sli4_ct_abort_unsol_event; break; } - totiocbsize += (pring->numCiocb * pring->sizeCiocb) + - (pring->numRiocb * pring->sizeRiocb); + totiocbsize += (pring->sli.sli3.numCiocb * + pring->sli.sli3.sizeCiocb) + + (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); } if (totiocbsize > MAX_SLIM_IOCB_SIZE) { /* Too many cmd / rsp ring entries in SLI2 SLIM */ @@ -8628,14 +9242,16 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba) for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; pring->ringno = i; - pring->next_cmdidx = 0; - pring->local_getidx = 0; - pring->cmdidx = 0; + pring->sli.sli3.next_cmdidx = 0; + pring->sli.sli3.local_getidx = 0; + pring->sli.sli3.cmdidx = 0; + pring->flag = 0; INIT_LIST_HEAD(&pring->txq); INIT_LIST_HEAD(&pring->txcmplq); INIT_LIST_HEAD(&pring->iocb_continueq); INIT_LIST_HEAD(&pring->iocb_continue_saveq); INIT_LIST_HEAD(&pring->postbufq); + spin_lock_init(&pring->ring_lock); } spin_unlock_irq(&phba->hbalock); return 1; @@ -8736,7 +9352,6 @@ lpfc_sli_host_down(struct lpfc_vport *vport) if (iocb->vport != vport) continue; list_move_tail(&iocb->list, &completions); - pring->txq_cnt--; } /* Next issue ABTS for everything on the txcmplq */ @@ -8784,7 +9399,7 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) int i; /* Shutdown the mailbox command sub-system */ - lpfc_sli_mbox_sys_shutdown(phba); + lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); lpfc_hba_down_prep(phba); @@ -8805,8 +9420,6 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) * given to the FW yet. */ list_splice_init(&pring->txq, &completions); - pring->txq_cnt = 0; - } spin_unlock_irqrestore(&phba->hbalock, flags); @@ -9134,6 +9747,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, IOCB_t *icmd = NULL; IOCB_t *iabt = NULL; int retval; + unsigned long iflags; /* * There are certain command types we don't want to abort. And we @@ -9186,7 +9800,17 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, iabt->un.acxri.abortIoTag, iabt->un.acxri.abortContextTag, abtsiocbp->iotag); - retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); + + if (phba->sli_rev == LPFC_SLI_REV4) { + /* Note: both hbalock and ring_lock need to be set here */ + spin_lock_irqsave(&pring->ring_lock, iflags); + retval = __lpfc_sli_issue_iocb(phba, pring->ringno, + abtsiocbp, 0); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + } else { + retval = __lpfc_sli_issue_iocb(phba, pring->ringno, + abtsiocbp, 0); + } if (retval) __lpfc_sli_release_iocbq(phba, abtsiocbp); @@ -9257,43 +9881,6 @@ abort_iotag_exit: } /** - * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring - * @phba: Pointer to HBA context object. - * @pring: Pointer to driver SLI ring object. - * - * This function aborts all iocbs in the given ring and frees all the iocb - * objects in txq. This function issues abort iocbs unconditionally for all - * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed - * to complete before the return of this function. The caller is not required - * to hold any locks. - **/ -static void -lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) -{ - LIST_HEAD(completions); - struct lpfc_iocbq *iocb, *next_iocb; - - if (pring->ringno == LPFC_ELS_RING) - lpfc_fabric_abort_hba(phba); - - spin_lock_irq(&phba->hbalock); - - /* Take off all the iocbs on txq for cancelling */ - list_splice_init(&pring->txq, &completions); - pring->txq_cnt = 0; - - /* Next issue ABTS for everything on the txcmplq */ - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) - lpfc_sli_abort_iotag_issue(phba, pring, iocb); - - spin_unlock_irq(&phba->hbalock); - - /* Cancel all the IOCBs from the completions list */ - lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, - IOERR_SLI_ABORTED); -} - -/** * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. * @phba: pointer to lpfc HBA data structure. * @@ -9308,7 +9895,7 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; - lpfc_sli_iocb_ring_abort(phba, pring); + lpfc_sli_abort_iocb_ring(phba, pring); } } @@ -9430,7 +10017,7 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "3096 ABORT_XRI_CN completing on xri x%x " + "3096 ABORT_XRI_CN completing on rpi x%x " "original iotag x%x, abort cmd iotag x%x " "status 0x%x, reason 0x%x\n", cmdiocb->iocb.un.acxri.abortContextTag, @@ -9480,6 +10067,13 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, abort_cmd) != 0) continue; + /* + * If the iocbq is already being aborted, don't take a second + * action, but do count it. + */ + if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) + continue; + /* issue ABTS for this IOCB based on iotag */ abtsiocb = lpfc_sli_get_iocbq(phba); if (abtsiocb == NULL) { @@ -9487,6 +10081,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, continue; } + /* indicate the IO is being aborted by the driver. */ + iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; + cmd = &iocbq->iocb; abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; @@ -9496,7 +10093,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; abtsiocb->iocb.ulpLe = 1; abtsiocb->iocb.ulpClass = cmd->ulpClass; - abtsiocb->vport = phba->pport; + abtsiocb->vport = vport; /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; @@ -9523,6 +10120,124 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, } /** + * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN + * @vport: Pointer to virtual port. + * @pring: Pointer to driver SLI ring object. + * @tgt_id: SCSI ID of the target. + * @lun_id: LUN ID of the scsi device. + * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. + * + * This function sends an abort command for every SCSI command + * associated with the given virtual port pending on the ring + * filtered by lpfc_sli_validate_fcp_iocb function. + * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the + * FCP iocbs associated with lun specified by tgt_id and lun_id + * parameters + * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the + * FCP iocbs associated with SCSI target specified by tgt_id parameter. + * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all + * FCP iocbs associated with virtual port. + * This function returns number of iocbs it aborted . + * This function is called with no locks held right after a taskmgmt + * command is sent. + **/ +int +lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, + uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *abtsiocbq; + struct lpfc_iocbq *iocbq; + IOCB_t *icmd; + int sum, i, ret_val; + unsigned long iflags; + struct lpfc_sli_ring *pring_s4; + uint32_t ring_number; + + spin_lock_irq(&phba->hbalock); + + /* all I/Os are in process of being flushed */ + if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { + spin_unlock_irq(&phba->hbalock); + return 0; + } + sum = 0; + + for (i = 1; i <= phba->sli.last_iotag; i++) { + iocbq = phba->sli.iocbq_lookup[i]; + + if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, + cmd) != 0) + continue; + + /* + * If the iocbq is already being aborted, don't take a second + * action, but do count it. + */ + if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) + continue; + + /* issue ABTS for this IOCB based on iotag */ + abtsiocbq = __lpfc_sli_get_iocbq(phba); + if (abtsiocbq == NULL) + continue; + + icmd = &iocbq->iocb; + abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; + abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; + if (phba->sli_rev == LPFC_SLI_REV4) + abtsiocbq->iocb.un.acxri.abortIoTag = + iocbq->sli4_xritag; + else + abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; + abtsiocbq->iocb.ulpLe = 1; + abtsiocbq->iocb.ulpClass = icmd->ulpClass; + abtsiocbq->vport = vport; + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx; + if (iocbq->iocb_flag & LPFC_IO_FCP) + abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; + + if (lpfc_is_link_up(phba)) + abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; + else + abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; + + /* Setup callback routine and issue the command. */ + abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; + + /* + * Indicate the IO is being aborted by the driver and set + * the caller's flag into the aborted IO. + */ + iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; + + if (phba->sli_rev == LPFC_SLI_REV4) { + ring_number = MAX_SLI3_CONFIGURED_RINGS + + iocbq->fcp_wqidx; + pring_s4 = &phba->sli.ring[ring_number]; + /* Note: both hbalock and ring_lock must be set here */ + spin_lock_irqsave(&pring_s4->ring_lock, iflags); + ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, + abtsiocbq, 0); + spin_unlock_irqrestore(&pring_s4->ring_lock, iflags); + } else { + ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, + abtsiocbq, 0); + } + + + if (ret_val == IOCB_ERROR) + __lpfc_sli_release_iocbq(phba, abtsiocbq); + else + sum++; + } + spin_unlock_irq(&phba->hbalock); + return sum; +} + +/** * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler * @phba: Pointer to HBA context object. * @cmdiocbq: Pointer to command iocb. @@ -9549,6 +10264,24 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd; spin_lock_irqsave(&phba->hbalock, iflags); + if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { + + /* + * A time out has occurred for the iocb. If a time out + * completion handler has been supplied, call it. Otherwise, + * just free the iocbq. + */ + + spin_unlock_irqrestore(&phba->hbalock, iflags); + cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; + cmdiocbq->wait_iocb_cmpl = NULL; + if (cmdiocbq->iocb_cmpl) + (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); + else + lpfc_sli_release_iocbq(phba, cmdiocbq); + return; + } + cmdiocbq->iocb_flag |= LPFC_IO_WAKE; if (cmdiocbq->context2 && rspiocbq) memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, @@ -9604,10 +10337,16 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba, * @timeout: Timeout in number of seconds. * * This function issues the iocb to firmware and waits for the - * iocb to complete. If the iocb command is not - * completed within timeout seconds, it returns IOCB_TIMEDOUT. - * Caller should not free the iocb resources if this function - * returns IOCB_TIMEDOUT. + * iocb to complete. The iocb_cmpl field of the shall be used + * to handle iocbs which time out. If the field is NULL, the + * function shall free the iocbq structure. If more clean up is + * needed, the caller is expected to provide a completion function + * that will provide the needed clean up. If the iocb command is + * not completed within timeout seconds, the function will either + * free the iocbq structure (if iocb_cmpl == NULL) or execute the + * completion function set in the iocb_cmpl field and then return + * a status of IOCB_TIMEDOUT. The caller should not free the iocb + * resources if this function returns IOCB_TIMEDOUT. * The function waits for the iocb completion using an * non-interruptible wait. * This function will sleep while waiting for iocb completion. @@ -9636,7 +10375,13 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, long timeleft, timeout_req = 0; int retval = IOCB_SUCCESS; uint32_t creg_val; + struct lpfc_iocbq *iocb; + int txq_cnt = 0; + int txcmplq_cnt = 0; struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + unsigned long iflags; + bool iocb_completed = true; + /* * If the caller has provided a response iocbq buffer, then context2 * is NULL or its an error. @@ -9647,9 +10392,10 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, piocb->context2 = prspiocbq; } + piocb->wait_iocb_cmpl = piocb->iocb_cmpl; piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; piocb->context_un.wait_queue = &done_q; - piocb->iocb_flag &= ~LPFC_IO_WAKE; + piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); if (phba->cfg_poll & DISABLE_FCP_RING_INT) { if (lpfc_readl(phba->HCregaddr, &creg_val)) @@ -9662,14 +10408,30 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, SLI_IOCB_RET_IOCB); if (retval == IOCB_SUCCESS) { - timeout_req = timeout * HZ; + timeout_req = msecs_to_jiffies(timeout * 1000); timeleft = wait_event_timeout(done_q, lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), timeout_req); + spin_lock_irqsave(&phba->hbalock, iflags); + if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { - if (piocb->iocb_flag & LPFC_IO_WAKE) { + /* + * IOCB timed out. Inform the wake iocb wait + * completion function and set local status + */ + + iocb_completed = false; + piocb->iocb_flag |= LPFC_IO_WAKE_TMO; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + if (iocb_completed) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0331 IOCB wake signaled\n"); + /* Note: we are not indicating if the IOCB has a success + * status or not - that's for the caller to check. + * IOCB_SUCCESS means just that the command was sent and + * completed. Not that it completed successfully. + * */ } else if (timeleft == 0) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0338 IOCB wait timeout error - no " @@ -9683,9 +10445,17 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, retval = IOCB_TIMEDOUT; } } else if (retval == IOCB_BUSY) { - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", - phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt); + if (phba->cfg_log_verbose & LOG_SLI) { + list_for_each_entry(iocb, &pring->txq, list) { + txq_cnt++; + } + list_for_each_entry(iocb, &pring->txcmplq, list) { + txcmplq_cnt++; + } + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", + phba->iocb_cnt, txq_cnt, txcmplq_cnt); + } return retval; } else { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, @@ -9741,12 +10511,13 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, uint32_t timeout) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); + MAILBOX_t *mb = NULL; int retval; unsigned long flag; - /* The caller must leave context1 empty. */ + /* The caller might set context1 for extended buffer */ if (pmboxq->context1) - return MBX_NOT_FINISHED; + mb = (MAILBOX_t *)pmboxq->context1; pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; /* setup wake call as IOCB callback */ @@ -9759,22 +10530,25 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, if (retval == MBX_BUSY || retval == MBX_SUCCESS) { wait_event_interruptible_timeout(done_q, pmboxq->mbox_flag & LPFC_MBX_WAKE, - timeout * HZ); + msecs_to_jiffies(timeout * 1000)); spin_lock_irqsave(&phba->hbalock, flag); - pmboxq->context1 = NULL; + /* restore the possible extended buffer for free resource */ + pmboxq->context1 = (uint8_t *)mb; /* * if LPFC_MBX_WAKE flag is set the mailbox is completed * else do not free the resources. */ if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { retval = MBX_SUCCESS; - lpfc_sli4_swap_str(phba, pmboxq); } else { retval = MBX_TIMEOUT; pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } spin_unlock_irqrestore(&phba->hbalock, flag); + } else { + /* restore the possible extended buffer for free resource */ + pmboxq->context1 = (uint8_t *)mb; } return retval; @@ -9796,18 +10570,23 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, * sub-system flush routine to gracefully bring down mailbox sub-system. **/ void -lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) +lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) { struct lpfc_sli *psli = &phba->sli; unsigned long timeout; + if (mbx_action == LPFC_MBX_NO_WAIT) { + /* delay 100ms for port state */ + msleep(100); + lpfc_sli_mbox_sys_flush(phba); + return; + } timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; + spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; - spin_unlock_irq(&phba->hbalock); if (psli->sli_flag & LPFC_SLI_ACTIVE) { - spin_lock_irq(&phba->hbalock); /* Determine how long we might wait for the active mailbox * command to be gracefully completed by firmware. */ @@ -9826,7 +10605,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) */ break; } - } + } else + spin_unlock_irq(&phba->hbalock); + lpfc_sli_mbox_sys_flush(phba); } @@ -10652,13 +11433,18 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbOut, struct lpfc_wcqe_complete *wcqe) { + int numBdes, i; unsigned long iflags; + uint32_t status, max_response; + struct lpfc_dmabuf *dmabuf; + struct ulp_bde64 *bpl, bde; size_t offset = offsetof(struct lpfc_iocbq, iocb); memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, sizeof(struct lpfc_iocbq) - offset); /* Map WCQE parameters into irspiocb parameters */ - pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); + status = bf_get(lpfc_wcqe_c_status, wcqe); + pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); if (pIocbOut->iocb_flag & LPFC_IO_FCP) if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) pIocbIn->iocb.un.fcpi.fcpi_parm = @@ -10668,7 +11454,74 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; else { pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; - pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed; + switch (pIocbOut->iocb.ulpCommand) { + case CMD_ELS_REQUEST64_CR: + dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; + bpl = (struct ulp_bde64 *)dmabuf->virt; + bde.tus.w = le32_to_cpu(bpl[1].tus.w); + max_response = bde.tus.f.bdeSize; + break; + case CMD_GEN_REQUEST64_CR: + max_response = 0; + if (!pIocbOut->context3) + break; + numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ + sizeof(struct ulp_bde64); + dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; + bpl = (struct ulp_bde64 *)dmabuf->virt; + for (i = 0; i < numBdes; i++) { + bde.tus.w = le32_to_cpu(bpl[i].tus.w); + if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) + max_response += bde.tus.f.bdeSize; + } + break; + default: + max_response = wcqe->total_data_placed; + break; + } + if (max_response < wcqe->total_data_placed) + pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; + else + pIocbIn->iocb.un.genreq64.bdl.bdeSize = + wcqe->total_data_placed; + } + + /* Convert BG errors for completion status */ + if (status == CQE_STATUS_DI_ERROR) { + pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; + + if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) + pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; + else + pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; + + pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; + if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ + pIocbIn->iocb.unsli3.sli3_bg.bgstat |= + BGS_GUARD_ERR_MASK; + if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ + pIocbIn->iocb.unsli3.sli3_bg.bgstat |= + BGS_APPTAG_ERR_MASK; + if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ + pIocbIn->iocb.unsli3.sli3_bg.bgstat |= + BGS_REFTAG_ERR_MASK; + + /* Check to see if there was any good data before the error */ + if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { + pIocbIn->iocb.unsli3.sli3_bg.bgstat |= + BGS_HI_WATER_MARK_PRESENT_MASK; + pIocbIn->iocb.unsli3.sli3_bg.bghm = + wcqe->total_data_placed; + } + + /* + * Set ALL the error bits to indicate we don't know what + * type of error it is. + */ + if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) + pIocbIn->iocb.unsli3.sli3_bg.bgstat |= + (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | + BGS_GUARD_ERR_MASK); } /* Pick up HBA exchange busy condition */ @@ -10700,12 +11553,12 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, unsigned long iflags; wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; - spin_lock_irqsave(&phba->hbalock, iflags); + spin_lock_irqsave(&pring->ring_lock, iflags); pring->stats.iocb_event++; /* Look up the ELS command IOCB and create pseudo response IOCB */ cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, bf_get(lpfc_wcqe_c_request_tag, wcqe)); - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&pring->ring_lock, iflags); if (unlikely(!cmdiocbq)) { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, @@ -10907,6 +11760,7 @@ lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) /** * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event * @phba: Pointer to HBA context object. + * @cq: Pointer to associated CQ * @wcqe: Pointer to work-queue completion queue entry. * * This routine handles an ELS work-queue completion event. @@ -10914,22 +11768,31 @@ lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) * Return: true if work posted to worker thread, otherwise false. **/ static bool -lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, +lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_wcqe_complete *wcqe) { struct lpfc_iocbq *irspiocbq; unsigned long iflags; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; + struct lpfc_sli_ring *pring = cq->pring; + int txq_cnt = 0; + int txcmplq_cnt = 0; + int fcp_txcmplq_cnt = 0; /* Get an irspiocbq for later ELS response processing use */ irspiocbq = lpfc_sli_get_iocbq(phba); if (!irspiocbq) { + if (!list_empty(&pring->txq)) + txq_cnt++; + if (!list_empty(&pring->txcmplq)) + txcmplq_cnt++; + if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq)) + fcp_txcmplq_cnt++; lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", - pring->txq_cnt, phba->iocb_cnt, - phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt, - phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt); + txq_cnt, phba->iocb_cnt, + fcp_txcmplq_cnt, + txcmplq_cnt); return false; } @@ -11064,14 +11927,17 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) case FC_STATUS_RQ_BUF_LEN_EXCEEDED: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2537 Receive Frame Truncated!!\n"); + hrq->RQ_buf_trunc++; case FC_STATUS_RQ_SUCCESS: lpfc_sli4_rq_release(hrq, drq); spin_lock_irqsave(&phba->hbalock, iflags); dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); if (!dma_buf) { + hrq->RQ_no_buf_found++; spin_unlock_irqrestore(&phba->hbalock, iflags); goto out; } + hrq->RQ_rcv_buf++; memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); /* save off the frame for the word thread to process */ list_add_tail(&dma_buf->cq_event.list, @@ -11083,6 +11949,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) break; case FC_STATUS_INSUFF_BUF_NEED_BUF: case FC_STATUS_INSUFF_BUF_FRM_DISC: + hrq->RQ_no_posted_buf++; /* Post more buffers if possible */ spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; @@ -11120,7 +11987,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, case CQE_CODE_COMPL_WQE: /* Process the WQ/RQ complete event */ phba->last_completion_time = jiffies; - workposted = lpfc_sli4_sp_handle_els_wcqe(phba, + workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, (struct lpfc_wcqe_complete *)&cqevt); break; case CQE_CODE_RELEASE_WQE: @@ -11164,31 +12031,18 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, * **/ static void -lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) +lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, + struct lpfc_queue *speq) { - struct lpfc_queue *cq = NULL, *childq, *speq; + struct lpfc_queue *cq = NULL, *childq; struct lpfc_cqe *cqe; bool workposted = false; int ecount = 0; uint16_t cqid; - if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0359 Not a valid slow-path completion " - "event: majorcode=x%x, minorcode=x%x\n", - bf_get_le32(lpfc_eqe_major_code, eqe), - bf_get_le32(lpfc_eqe_minor_code, eqe)); - return; - } - /* Get the reference to the corresponding CQ */ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); - /* Search for completion queue pointer matching this cqid */ - speq = phba->sli4_hba.sp_eq; - /* sanity check on queue memory */ - if (unlikely(!speq)) - return; list_for_each_entry(childq, &speq->child_list, list) { if (childq->queue_id == cqid) { cq = childq; @@ -11210,6 +12064,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); if (!(++ecount % cq->entry_repost)) lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); + cq->CQ_mbox++; } break; case LPFC_WCQ: @@ -11223,6 +12078,10 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) if (!(++ecount % cq->entry_repost)) lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); } + + /* Track the max number of CQEs processed in 1 EQ */ + if (ecount > cq->CQ_max_cqe) + cq->CQ_max_cqe = ecount; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -11247,34 +12106,33 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) /** * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry - * @eqe: Pointer to fast-path completion queue entry. + * @phba: Pointer to HBA context object. + * @cq: Pointer to associated CQ + * @wcqe: Pointer to work-queue completion queue entry. * * This routine process a fast-path work queue completion entry from fast-path * event queue for FCP command response completion. **/ static void -lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, +lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_wcqe_complete *wcqe) { - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; + struct lpfc_sli_ring *pring = cq->pring; struct lpfc_iocbq *cmdiocbq; struct lpfc_iocbq irspiocbq; unsigned long iflags; - spin_lock_irqsave(&phba->hbalock, iflags); - pring->stats.iocb_event++; - spin_unlock_irqrestore(&phba->hbalock, iflags); - /* Check for response status */ if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { /* If resource errors reported from HBA, reduce queue * depth of the SCSI device. */ - if ((bf_get(lpfc_wcqe_c_status, wcqe) == - IOSTAT_LOCAL_REJECT) && - (wcqe->parameter == IOERR_NO_RESOURCES)) { + if (((bf_get(lpfc_wcqe_c_status, wcqe) == + IOSTAT_LOCAL_REJECT)) && + ((wcqe->parameter & IOERR_PARAM_MASK) == + IOERR_NO_RESOURCES)) phba->lpfc_rampdown_queue_depth(phba); - } + /* Log the error status */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0373 FCP complete error: status=x%x, " @@ -11287,10 +12145,11 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, } /* Look up the FCP command IOCB and create pseudo response IOCB */ - spin_lock_irqsave(&phba->hbalock, iflags); + spin_lock_irqsave(&pring->ring_lock, iflags); + pring->stats.iocb_event++; cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, bf_get(lpfc_wcqe_c_request_tag, wcqe)); - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&pring->ring_lock, iflags); if (unlikely(!cmdiocbq)) { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0374 FCP complete with no corresponding " @@ -11374,17 +12233,20 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, /* Check and process for different type of WCQE and dispatch */ switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { case CQE_CODE_COMPL_WQE: + cq->CQ_wq++; /* Process the WQ complete event */ phba->last_completion_time = jiffies; - lpfc_sli4_fp_handle_fcp_wcqe(phba, + lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, (struct lpfc_wcqe_complete *)&wcqe); break; case CQE_CODE_RELEASE_WQE: + cq->CQ_release_wqe++; /* Process the WQ release event */ lpfc_sli4_fp_handle_rel_wcqe(phba, cq, (struct lpfc_wcqe_release *)&wcqe); break; case CQE_CODE_XRI_ABORTED: + cq->CQ_xri_aborted++; /* Process the WQ XRI abort event */ phba->last_completion_time = jiffies; workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, @@ -11400,7 +12262,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, } /** - * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry + * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry * @phba: Pointer to HBA context object. * @eqe: Pointer to fast-path event queue entry. * @@ -11412,8 +12274,8 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, * completion queue, and then return. **/ static void -lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, - uint32_t fcp_cqidx) +lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, + uint32_t qidx) { struct lpfc_queue *cq; struct lpfc_cqe *cqe; @@ -11423,30 +12285,38 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0366 Not a valid fast-path completion " + "0366 Not a valid completion " "event: majorcode=x%x, minorcode=x%x\n", bf_get_le32(lpfc_eqe_major_code, eqe), bf_get_le32(lpfc_eqe_minor_code, eqe)); return; } + /* Get the reference to the corresponding CQ */ + cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); + + /* Check if this is a Slow path event */ + if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) { + lpfc_sli4_sp_handle_eqe(phba, eqe, + phba->sli4_hba.hba_eq[qidx]); + return; + } + if (unlikely(!phba->sli4_hba.fcp_cq)) { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3146 Fast-path completion queues " "does not exist\n"); return; } - cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; + cq = phba->sli4_hba.fcp_cq[qidx]; if (unlikely(!cq)) { if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0367 Fast-path completion queue " - "(%d) does not exist\n", fcp_cqidx); + "(%d) does not exist\n", qidx); return; } - /* Get the reference to the corresponding CQ */ - cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); if (unlikely(cqid != cq->queue_id)) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0368 Miss-matched fast-path completion " @@ -11462,6 +12332,10 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); } + /* Track the max number of CQEs processed in 1 EQ */ + if (ecount > cq->CQ_max_cqe) + cq->CQ_max_cqe = ecount; + /* Catch the no cq entry condition */ if (unlikely(ecount == 0)) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -11489,87 +12363,177 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); } + +/** + * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue + * entry + * @phba: Pointer to HBA context object. + * @eqe: Pointer to fast-path event queue entry. + * + * This routine process a event queue entry from the Flash Optimized Fabric + * event queue. It will check the MajorCode and MinorCode to determine this + * is for a completion event on a completion queue, if not, an error shall be + * logged and just return. Otherwise, it will get to the corresponding + * completion queue and process all the entries on the completion queue, rearm + * the completion queue, and then return. + **/ +static void +lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) +{ + struct lpfc_queue *cq; + struct lpfc_cqe *cqe; + bool workposted = false; + uint16_t cqid; + int ecount = 0; + + if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "9147 Not a valid completion " + "event: majorcode=x%x, minorcode=x%x\n", + bf_get_le32(lpfc_eqe_major_code, eqe), + bf_get_le32(lpfc_eqe_minor_code, eqe)); + return; + } + + /* Get the reference to the corresponding CQ */ + cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); + + /* Next check for OAS */ + cq = phba->sli4_hba.oas_cq; + if (unlikely(!cq)) { + if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "9148 OAS completion queue " + "does not exist\n"); + return; + } + + if (unlikely(cqid != cq->queue_id)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "9149 Miss-matched fast-path compl " + "queue id: eqcqid=%d, fcpcqid=%d\n", + cqid, cq->queue_id); + return; + } + + /* Process all the entries to the OAS CQ */ + while ((cqe = lpfc_sli4_cq_get(cq))) { + workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); + if (!(++ecount % cq->entry_repost)) + lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); + } + + /* Track the max number of CQEs processed in 1 EQ */ + if (ecount > cq->CQ_max_cqe) + cq->CQ_max_cqe = ecount; + + /* Catch the no cq entry condition */ + if (unlikely(ecount == 0)) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "9153 No entry from fast-path completion " + "queue fcpcqid=%d\n", cq->queue_id); + + /* In any case, flash and re-arm the CQ */ + lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); + + /* wake up worker thread if there are works to be done */ + if (workposted) + lpfc_worker_wake_up(phba); +} + /** - * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device + * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device * @irq: Interrupt number. * @dev_id: The device context pointer. * * This function is directly called from the PCI layer as an interrupt * service routine when device with SLI-4 interface spec is enabled with - * MSI-X multi-message interrupt mode and there are slow-path events in - * the HBA. However, when the device is enabled with either MSI or Pin-IRQ - * interrupt mode, this function is called as part of the device-level - * interrupt handler. When the PCI slot is in error recovery or the HBA is - * undergoing initialization, the interrupt handler will not process the - * interrupt. The link attention and ELS ring attention events are handled - * by the worker thread. The interrupt handler signals the worker thread - * and returns for these events. This function is called without any lock - * held. It gets the hbalock to access and update SLI data structures. + * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric + * IOCB ring event in the HBA. However, when the device is enabled with either + * MSI or Pin-IRQ interrupt mode, this function is called as part of the + * device-level interrupt handler. When the PCI slot is in error recovery + * or the HBA is undergoing initialization, the interrupt handler will not + * process the interrupt. The Flash Optimized Fabric ring event are handled in + * the intrrupt context. This function is called without any lock held. + * It gets the hbalock to access and update SLI data structures. Note that, + * the EQ to CQ are one-to-one map such that the EQ index is + * equal to that of CQ index. * * This function returns IRQ_HANDLED when interrupt is handled else it * returns IRQ_NONE. **/ irqreturn_t -lpfc_sli4_sp_intr_handler(int irq, void *dev_id) +lpfc_sli4_fof_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; - struct lpfc_queue *speq; + struct lpfc_fcp_eq_hdl *fcp_eq_hdl; + struct lpfc_queue *eq; struct lpfc_eqe *eqe; unsigned long iflag; int ecount = 0; + uint32_t eqidx; - /* - * Get the driver's phba structure from the dev_id - */ - phba = (struct lpfc_hba *)dev_id; + /* Get the driver's phba structure from the dev_id */ + fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; + phba = fcp_eq_hdl->phba; + eqidx = fcp_eq_hdl->idx; if (unlikely(!phba)) return IRQ_NONE; /* Get to the EQ struct associated with this vector */ - speq = phba->sli4_hba.sp_eq; - if (unlikely(!speq)) + eq = phba->sli4_hba.fof_eq; + if (unlikely(!eq)) return IRQ_NONE; /* Check device state for handling interrupt */ if (unlikely(lpfc_intr_state_check(phba))) { + eq->EQ_badstate++; /* Check again for link_state with lock held */ spin_lock_irqsave(&phba->hbalock, iflag); if (phba->link_state < LPFC_LINK_DOWN) /* Flush, clear interrupt, and rearm the EQ */ - lpfc_sli4_eq_flush(phba, speq); + lpfc_sli4_eq_flush(phba, eq); spin_unlock_irqrestore(&phba->hbalock, iflag); return IRQ_NONE; } /* - * Process all the event on FCP slow-path EQ + * Process all the event on FCP fast-path EQ */ - while ((eqe = lpfc_sli4_eq_get(speq))) { - lpfc_sli4_sp_handle_eqe(phba, eqe); - if (!(++ecount % speq->entry_repost)) - lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); + while ((eqe = lpfc_sli4_eq_get(eq))) { + lpfc_sli4_fof_handle_eqe(phba, eqe); + if (!(++ecount % eq->entry_repost)) + lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM); + eq->EQ_processed++; } - /* Always clear and re-arm the slow-path EQ */ - lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM); + /* Track the max number of EQEs processed in 1 intr */ + if (ecount > eq->EQ_max_eqe) + eq->EQ_max_eqe = ecount; + - /* Catch the no cq entry condition */ if (unlikely(ecount == 0)) { + eq->EQ_no_entry++; + if (phba->intr_type == MSIX) /* MSI-X treated interrupt served as no EQ share INT */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0357 MSI-X interrupt with no EQE\n"); - else + "9145 MSI-X interrupt with no EQE\n"); + else { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "9146 ISR interrupt with no EQE\n"); /* Non MSI-X treated on interrupt as EQ share INT */ return IRQ_NONE; + } } - + /* Always clear and re-arm the fast-path EQ */ + lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); return IRQ_HANDLED; -} /* lpfc_sli4_sp_intr_handler */ +} /** - * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device + * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device * @irq: Interrupt number. * @dev_id: The device context pointer. * @@ -11586,11 +12550,16 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id) * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is * equal to that of FCP CQ index. * + * The link attention and ELS ring attention events are handled + * by the worker thread. The interrupt handler signals the worker thread + * and returns for these events. This function is called without any lock + * held. It gets the hbalock to access and update SLI data structures. + * * This function returns IRQ_HANDLED when interrupt is handled else it * returns IRQ_NONE. **/ irqreturn_t -lpfc_sli4_fp_intr_handler(int irq, void *dev_id) +lpfc_sli4_hba_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; struct lpfc_fcp_eq_hdl *fcp_eq_hdl; @@ -11598,7 +12567,7 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id) struct lpfc_eqe *eqe; unsigned long iflag; int ecount = 0; - uint32_t fcp_eqidx; + int fcp_eqidx; /* Get the driver's phba structure from the dev_id */ fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; @@ -11607,22 +12576,34 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id) if (unlikely(!phba)) return IRQ_NONE; - if (unlikely(!phba->sli4_hba.fp_eq)) + if (unlikely(!phba->sli4_hba.hba_eq)) return IRQ_NONE; /* Get to the EQ struct associated with this vector */ - fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; + fpeq = phba->sli4_hba.hba_eq[fcp_eqidx]; if (unlikely(!fpeq)) return IRQ_NONE; + if (lpfc_fcp_look_ahead) { + if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use)) + lpfc_sli4_eq_clr_intr(fpeq); + else { + atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); + return IRQ_NONE; + } + } + /* Check device state for handling interrupt */ if (unlikely(lpfc_intr_state_check(phba))) { + fpeq->EQ_badstate++; /* Check again for link_state with lock held */ spin_lock_irqsave(&phba->hbalock, iflag); if (phba->link_state < LPFC_LINK_DOWN) /* Flush, clear interrupt, and rearm the EQ */ lpfc_sli4_eq_flush(phba, fpeq); spin_unlock_irqrestore(&phba->hbalock, iflag); + if (lpfc_fcp_look_ahead) + atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); return IRQ_NONE; } @@ -11630,15 +12611,27 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id) * Process all the event on FCP fast-path EQ */ while ((eqe = lpfc_sli4_eq_get(fpeq))) { - lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); + lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx); if (!(++ecount % fpeq->entry_repost)) lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); + fpeq->EQ_processed++; } + /* Track the max number of EQEs processed in 1 intr */ + if (ecount > fpeq->EQ_max_eqe) + fpeq->EQ_max_eqe = ecount; + /* Always clear and re-arm the fast-path EQ */ lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); if (unlikely(ecount == 0)) { + fpeq->EQ_no_entry++; + + if (lpfc_fcp_look_ahead) { + atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); + return IRQ_NONE; + } + if (phba->intr_type == MSIX) /* MSI-X treated interrupt served as no EQ share INT */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, @@ -11648,6 +12641,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id) return IRQ_NONE; } + if (lpfc_fcp_look_ahead) + atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); return IRQ_HANDLED; } /* lpfc_sli4_fp_intr_handler */ @@ -11672,9 +12667,9 @@ irqreturn_t lpfc_sli4_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; - irqreturn_t sp_irq_rc, fp_irq_rc; - bool fp_handled = false; - uint32_t fcp_eqidx; + irqreturn_t hba_irq_rc; + bool hba_handled = false; + int fcp_eqidx; /* Get the driver's phba structure from the dev_id */ phba = (struct lpfc_hba *)dev_id; @@ -11683,21 +12678,23 @@ lpfc_sli4_intr_handler(int irq, void *dev_id) return IRQ_NONE; /* - * Invokes slow-path host attention interrupt handling as appropriate. - */ - sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id); - - /* * Invoke fast-path host attention interrupt handling as appropriate. */ - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { - fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { + hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); - if (fp_irq_rc == IRQ_HANDLED) - fp_handled |= true; + if (hba_irq_rc == IRQ_HANDLED) + hba_handled |= true; + } + + if (phba->cfg_fof) { + hba_irq_rc = lpfc_sli4_fof_intr_handler(irq, + &phba->sli4_hba.fcp_eq_hdl[0]); + if (hba_irq_rc == IRQ_HANDLED) + hba_handled |= true; } - return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; + return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; } /* lpfc_sli4_intr_handler */ /** @@ -11801,6 +12798,120 @@ out_fail: } /** + * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory + * @phba: HBA structure that indicates port to create a queue on. + * @pci_barset: PCI BAR set flag. + * + * This function shall perform iomap of the specified PCI BAR address to host + * memory address if not already done so and return it. The returned host + * memory address can be NULL. + */ +static void __iomem * +lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) +{ + struct pci_dev *pdev; + + if (!phba->pcidev) + return NULL; + else + pdev = phba->pcidev; + + switch (pci_barset) { + case WQ_PCI_BAR_0_AND_1: + return phba->pci_bar0_memmap_p; + case WQ_PCI_BAR_2_AND_3: + return phba->pci_bar2_memmap_p; + case WQ_PCI_BAR_4_AND_5: + return phba->pci_bar4_memmap_p; + default: + break; + } + return NULL; +} + +/** + * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs + * @phba: HBA structure that indicates port to create a queue on. + * @startq: The starting FCP EQ to modify + * + * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @startq + * is used to get the starting FCP EQ to change. + * This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. + **/ +uint32_t +lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq) +{ + struct lpfc_mbx_modify_eq_delay *eq_delay; + LPFC_MBOXQ_t *mbox; + struct lpfc_queue *eq; + int cnt, rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + uint32_t result; + int fcp_eqidx; + union lpfc_sli4_cfg_shdr *shdr; + uint16_t dmult; + + if (startq >= phba->cfg_fcp_io_channel) + return 0; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_modify_eq_delay) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, + length, LPFC_SLI4_MBX_EMBED); + eq_delay = &mbox->u.mqe.un.eq_delay; + + /* Calculate delay multiper from maximum interrupt per second */ + result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel; + if (result > LPFC_DMULT_CONST) + dmult = 0; + else + dmult = LPFC_DMULT_CONST/result - 1; + + cnt = 0; + for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel; + fcp_eqidx++) { + eq = phba->sli4_hba.hba_eq[fcp_eqidx]; + if (!eq) + continue; + eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; + eq_delay->u.request.eq[cnt].phase = 0; + eq_delay->u.request.eq[cnt].delay_multi = dmult; + cnt++; + if (cnt >= LPFC_MAX_EQ_DELAY) + break; + } + eq_delay->u.request.num_eq = cnt; + + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->context1 = NULL; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2512 MODIFY_EQ_DELAY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** * lpfc_eq_create - Create an Event Queue on the HBA * @phba: HBA structure that indicates port to create a queue on. * @eq: The queue structure to use to create the event queue. @@ -11821,7 +12932,7 @@ out_fail: * fails this function will return -ENXIO. **/ uint32_t -lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) +lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) { struct lpfc_mbx_eq_create *eq_create; LPFC_MBOXQ_t *mbox; @@ -11853,7 +12964,10 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) LPFC_EQE_SIZE); bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); /* Calculate delay multiper from maximum interrupt per second */ - dmult = LPFC_DMULT_CONST/imax - 1; + if (imax > LPFC_DMULT_CONST) + dmult = 0; + else + dmult = LPFC_DMULT_CONST/imax - 1; bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, dmult); switch (eq->entry_count) { @@ -11987,8 +13101,10 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0361 Unsupported CQ count. (%d)\n", cq->entry_count); - if (cq->entry_count < 256) - return -EINVAL; + if (cq->entry_count < 256) { + status = -EINVAL; + goto out; + } /* otherwise default to smallest count (drop through) */ case 256: bf_set(lpfc_cq_context_count, &cq_create->u.request.context, @@ -12179,8 +13295,10 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0362 Unsupported MQ count. (%d)\n", mq->entry_count); - if (mq->entry_count < 16) - return -EINVAL; + if (mq->entry_count < 16) { + status = -EINVAL; + goto out; + } /* otherwise default to smallest count (drop through) */ case 16: bf_set(lpfc_mq_context_ring_size, @@ -12288,6 +13406,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, union lpfc_sli4_cfg_shdr *shdr; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; struct dma_address *page; + void __iomem *bar_memmap_p; + uint32_t db_offset; + uint16_t pci_barset; /* sanity check on queue memory */ if (!wq || !cq) @@ -12309,9 +13430,44 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, wq->page_count); bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, cq->queue_id); + + /* wqv is the earliest version supported, NOT the latest */ bf_set(lpfc_mbox_hdr_version, &shdr->request, phba->sli4_hba.pc_sli4_params.wqv); - if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { + + switch (phba->sli4_hba.pc_sli4_params.wqv) { + case LPFC_Q_CREATE_VERSION_0: + switch (wq->entry_size) { + default: + case 64: + /* Nothing to do, version 0 ONLY supports 64 byte */ + page = wq_create->u.request.page; + break; + case 128: + if (!(phba->sli4_hba.pc_sli4_params.wqsize & + LPFC_WQ_SZ128_SUPPORT)) { + status = -ERANGE; + goto out; + } + /* If we get here the HBA MUST also support V1 and + * we MUST use it + */ + bf_set(lpfc_mbox_hdr_version, &shdr->request, + LPFC_Q_CREATE_VERSION_1); + + bf_set(lpfc_mbx_wq_create_wqe_count, + &wq_create->u.request_1, wq->entry_count); + bf_set(lpfc_mbx_wq_create_wqe_size, + &wq_create->u.request_1, + LPFC_WQ_WQE_SIZE_128); + bf_set(lpfc_mbx_wq_create_page_size, + &wq_create->u.request_1, + (PAGE_SIZE/SLI4_PAGE_SIZE)); + page = wq_create->u.request_1.page; + break; + } + break; + case LPFC_Q_CREATE_VERSION_1: bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, wq->entry_count); switch (wq->entry_size) { @@ -12322,6 +13478,11 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, LPFC_WQ_WQE_SIZE_64); break; case 128: + if (!(phba->sli4_hba.pc_sli4_params.wqsize & + LPFC_WQ_SZ128_SUPPORT)) { + status = -ERANGE; + goto out; + } bf_set(lpfc_mbx_wq_create_wqe_size, &wq_create->u.request_1, LPFC_WQ_WQE_SIZE_128); @@ -12330,14 +13491,21 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, (PAGE_SIZE/SLI4_PAGE_SIZE)); page = wq_create->u.request_1.page; - } else { - page = wq_create->u.request.page; + break; + default: + status = -ERANGE; + goto out; } + list_for_each_entry(dmabuf, &wq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); } + + if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) + bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); @@ -12355,6 +13523,48 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, status = -ENXIO; goto out; } + if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { + wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, + &wq_create->u.response); + if ((wq->db_format != LPFC_DB_LIST_FORMAT) && + (wq->db_format != LPFC_DB_RING_FORMAT)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3265 WQ[%d] doorbell format not " + "supported: x%x\n", wq->queue_id, + wq->db_format); + status = -EINVAL; + goto out; + } + pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, + &wq_create->u.response); + bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); + if (!bar_memmap_p) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3263 WQ[%d] failed to memmap pci " + "barset:x%x\n", wq->queue_id, + pci_barset); + status = -ENOMEM; + goto out; + } + db_offset = wq_create->u.response.doorbell_offset; + if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && + (db_offset != LPFC_ULP1_WQ_DOORBELL)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3252 WQ[%d] doorbell offset not " + "supported: x%x\n", wq->queue_id, + db_offset); + status = -EINVAL; + goto out; + } + wq->db_regaddr = bar_memmap_p + db_offset; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3264 WQ[%d]: barset:x%x, offset:x%x, " + "format:x%x\n", wq->queue_id, pci_barset, + db_offset, wq->db_format); + } else { + wq->db_format = LPFC_DB_LIST_FORMAT; + wq->db_regaddr = phba->sli4_hba.WQDBregaddr; + } wq->type = LPFC_WQ; wq->assoc_qid = cq->queue_id; wq->subtype = subtype; @@ -12431,6 +13641,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + void __iomem *bar_memmap_p; + uint32_t db_offset; + uint16_t pci_barset; /* sanity check on queue memory */ if (!hrq || !drq || !cq) @@ -12469,8 +13682,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2535 Unsupported RQ count. (%d)\n", hrq->entry_count); - if (hrq->entry_count < 512) - return -EINVAL; + if (hrq->entry_count < 512) { + status = -EINVAL; + goto out; + } /* otherwise default to smallest count (drop through) */ case 512: bf_set(lpfc_rq_context_rqe_count, @@ -12507,6 +13722,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); } + if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) + bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); @@ -12524,6 +13742,51 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, status = -ENXIO; goto out; } + + if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { + hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, + &rq_create->u.response); + if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && + (hrq->db_format != LPFC_DB_RING_FORMAT)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3262 RQ [%d] doorbell format not " + "supported: x%x\n", hrq->queue_id, + hrq->db_format); + status = -EINVAL; + goto out; + } + + pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, + &rq_create->u.response); + bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); + if (!bar_memmap_p) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3269 RQ[%d] failed to memmap pci " + "barset:x%x\n", hrq->queue_id, + pci_barset); + status = -ENOMEM; + goto out; + } + + db_offset = rq_create->u.response.doorbell_offset; + if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && + (db_offset != LPFC_ULP1_RQ_DOORBELL)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3270 RQ[%d] doorbell offset not " + "supported: x%x\n", hrq->queue_id, + db_offset); + status = -EINVAL; + goto out; + } + hrq->db_regaddr = bar_memmap_p + db_offset; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " + "format:x%x\n", hrq->queue_id, pci_barset, + db_offset, hrq->db_format); + } else { + hrq->db_format = LPFC_DB_RING_FORMAT; + hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; + } hrq->type = LPFC_HRQ; hrq->assoc_qid = cq->queue_id; hrq->subtype = subtype; @@ -12550,8 +13813,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2536 Unsupported RQ count. (%d)\n", drq->entry_count); - if (drq->entry_count < 512) - return -EINVAL; + if (drq->entry_count < 512) { + status = -EINVAL; + goto out; + } /* otherwise default to smallest count (drop through) */ case 512: bf_set(lpfc_rq_context_rqe_count, @@ -12587,6 +13852,8 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); } + if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) + bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; @@ -13021,9 +14288,7 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba) } else { set_bit(xri, phba->sli4_hba.xri_bmask); phba->sli4_hba.max_cfg_param.xri_used++; - phba->sli4_hba.xri_count++; } - spin_unlock_irq(&phba->hbalock); return xri; } @@ -13039,7 +14304,6 @@ void __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) { if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { - phba->sli4_hba.xri_count--; phba->sli4_hba.max_cfg_param.xri_used--; } } @@ -13075,46 +14339,45 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba) uint16_t xri_index; xri_index = lpfc_sli4_alloc_xri(phba); - if (xri_index != NO_XRI) - return xri_index; - - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2004 Failed to allocate XRI.last XRITAG is %d" - " Max XRI is %d, Used XRI is %d\n", - xri_index, - phba->sli4_hba.max_cfg_param.max_xri, - phba->sli4_hba.max_cfg_param.xri_used); - return NO_XRI; + if (xri_index == NO_XRI) + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2004 Failed to allocate XRI.last XRITAG is %d" + " Max XRI is %d, Used XRI is %d\n", + xri_index, + phba->sli4_hba.max_cfg_param.max_xri, + phba->sli4_hba.max_cfg_param.xri_used); + return xri_index; } /** * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port. * @phba: pointer to lpfc hba data structure. + * @post_sgl_list: pointer to els sgl entry list. + * @count: number of els sgl entries on the list. * * This routine is invoked to post a block of driver's sgl pages to the * HBA using non-embedded mailbox command. No Lock is held. This routine * is only called when the driver is loading and after all IO has been * stopped. **/ -int -lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba) +static int +lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba, + struct list_head *post_sgl_list, + int post_cnt) { - struct lpfc_sglq *sglq_entry; + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; struct lpfc_mbx_post_uembed_sgl_page1 *sgl; struct sgl_page_pairs *sgl_pg_pairs; void *viraddr; LPFC_MBOXQ_t *mbox; uint32_t reqlen, alloclen, pg_pairs; uint32_t mbox_tmo; - uint16_t xritag_start = 0, lxri = 0; - int els_xri_cnt, rc = 0; + uint16_t xritag_start = 0; + int rc = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; - /* The number of sgls to be posted */ - els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); - - reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + + reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); if (reqlen > SLI4_PAGE_SIZE) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, @@ -13144,25 +14407,8 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba) sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; sgl_pg_pairs = &sgl->sgl_pg_pairs; - for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { - sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; - - /* - * Assign the sglq a physical xri only if the driver has not - * initialized those resources. A port reset only needs - * the sglq's posted. - */ - if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) != - LPFC_XRI_RSRC_RDY) { - lxri = lpfc_sli4_next_xritag(phba); - if (lxri == NO_XRI) { - lpfc_sli4_mbox_cmd_free(phba, mbox); - return -ENOMEM; - } - sglq_entry->sli4_lxritag = lxri; - sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; - } - + pg_pairs = 0; + list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { /* Set up the sge entry */ sgl_pg_pairs->sgl_pg0_addr_lo = cpu_to_le32(putPaddrLow(sglq_entry->phys)); @@ -13177,11 +14423,12 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba) if (pg_pairs == 0) xritag_start = sglq_entry->sli4_xritag; sgl_pg_pairs++; + pg_pairs++; } /* Complete initialization and perform endian conversion. */ bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); - bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); + bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt); sgl->word0 = cpu_to_le32(sgl->word0); if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); @@ -13201,181 +14448,6 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba) shdr_status, shdr_add_status, rc); rc = -ENXIO; } - - if (rc == 0) - bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, - LPFC_XRI_RSRC_RDY); - return rc; -} - -/** - * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to post a block of driver's sgl pages to the - * HBA using non-embedded mailbox command. No Lock is held. This routine - * is only called when the driver is loading and after all IO has been - * stopped. - **/ -int -lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba) -{ - struct lpfc_sglq *sglq_entry; - struct lpfc_mbx_post_uembed_sgl_page1 *sgl; - struct sgl_page_pairs *sgl_pg_pairs; - void *viraddr; - LPFC_MBOXQ_t *mbox; - uint32_t reqlen, alloclen, index; - uint32_t mbox_tmo; - uint16_t rsrc_start, rsrc_size, els_xri_cnt; - uint16_t xritag_start = 0, lxri = 0; - struct lpfc_rsrc_blks *rsrc_blk; - int cnt, ttl_cnt, rc = 0; - int loop_cnt; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - /* The number of sgls to be posted */ - els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); - - reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + - sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); - if (reqlen > SLI4_PAGE_SIZE) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "2989 Block sgl registration required DMA " - "size (%d) great than a page\n", reqlen); - return -ENOMEM; - } - - cnt = 0; - ttl_cnt = 0; - list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list, - list) { - rsrc_start = rsrc_blk->rsrc_start; - rsrc_size = rsrc_blk->rsrc_size; - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3014 Working ELS Extent start %d, cnt %d\n", - rsrc_start, rsrc_size); - - loop_cnt = min(els_xri_cnt, rsrc_size); - if (ttl_cnt + loop_cnt >= els_xri_cnt) { - loop_cnt = els_xri_cnt - ttl_cnt; - ttl_cnt = els_xri_cnt; - } - - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - return -ENOMEM; - /* - * Allocate DMA memory and set up the non-embedded mailbox - * command. - */ - alloclen = lpfc_sli4_config(phba, mbox, - LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, - reqlen, LPFC_SLI4_MBX_NEMBED); - if (alloclen < reqlen) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2987 Allocated DMA memory size (%d) " - "is less than the requested DMA memory " - "size (%d)\n", alloclen, reqlen); - lpfc_sli4_mbox_cmd_free(phba, mbox); - return -ENOMEM; - } - - /* Set up the SGL pages in the non-embedded DMA pages */ - viraddr = mbox->sge_array->addr[0]; - sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; - sgl_pg_pairs = &sgl->sgl_pg_pairs; - - /* - * The starting resource may not begin at zero. Control - * the loop variants via the block resource parameters, - * but handle the sge pointers with a zero-based index - * that doesn't get reset per loop pass. - */ - for (index = rsrc_start; - index < rsrc_start + loop_cnt; - index++) { - sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt]; - - /* - * Assign the sglq a physical xri only if the driver - * has not initialized those resources. A port reset - * only needs the sglq's posted. - */ - if (bf_get(lpfc_xri_rsrc_rdy, - &phba->sli4_hba.sli4_flags) != - LPFC_XRI_RSRC_RDY) { - lxri = lpfc_sli4_next_xritag(phba); - if (lxri == NO_XRI) { - lpfc_sli4_mbox_cmd_free(phba, mbox); - rc = -ENOMEM; - goto err_exit; - } - sglq_entry->sli4_lxritag = lxri; - sglq_entry->sli4_xritag = - phba->sli4_hba.xri_ids[lxri]; - } - - /* Set up the sge entry */ - sgl_pg_pairs->sgl_pg0_addr_lo = - cpu_to_le32(putPaddrLow(sglq_entry->phys)); - sgl_pg_pairs->sgl_pg0_addr_hi = - cpu_to_le32(putPaddrHigh(sglq_entry->phys)); - sgl_pg_pairs->sgl_pg1_addr_lo = - cpu_to_le32(putPaddrLow(0)); - sgl_pg_pairs->sgl_pg1_addr_hi = - cpu_to_le32(putPaddrHigh(0)); - - /* Track the starting physical XRI for the mailbox. */ - if (index == rsrc_start) - xritag_start = sglq_entry->sli4_xritag; - sgl_pg_pairs++; - cnt++; - } - - /* Complete initialization and perform endian conversion. */ - rsrc_blk->rsrc_used += loop_cnt; - bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); - bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt); - sgl->word0 = cpu_to_le32(sgl->word0); - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3015 Post ELS Extent SGL, start %d, " - "cnt %d, used %d\n", - xritag_start, loop_cnt, rsrc_blk->rsrc_used); - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - else { - mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); - rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); - } - shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, - &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, - &shdr->response); - if (rc != MBX_TIMEOUT) - lpfc_sli4_mbox_cmd_free(phba, mbox); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2988 POST_SGL_BLOCK mailbox " - "command failed status x%x " - "add_status x%x mbx status x%x\n", - shdr_status, shdr_add_status, rc); - rc = -ENXIO; - goto err_exit; - } - if (ttl_cnt >= els_xri_cnt) - break; - } - - err_exit: - if (rc == 0) - bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, - LPFC_XRI_RSRC_RDY); return rc; } @@ -13391,8 +14463,9 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba) * **/ int -lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, - int cnt) +lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, + struct list_head *sblist, + int count) { struct lpfc_scsi_buf *psb; struct lpfc_mbx_post_uembed_sgl_page1 *sgl; @@ -13408,7 +14481,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, union lpfc_sli4_cfg_shdr *shdr; /* Calculate the requested length of the dma memory */ - reqlen = cnt * sizeof(struct sgl_page_pairs) + + reqlen = count * sizeof(struct sgl_page_pairs) + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); if (reqlen > SLI4_PAGE_SIZE) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, @@ -13492,169 +14565,6 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, } /** - * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port. - * @phba: pointer to lpfc hba data structure. - * @sblist: pointer to scsi buffer list. - * @count: number of scsi buffers on the list. - * - * This routine is invoked to post a block of @count scsi sgl pages from a - * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. - * No Lock is held. - * - **/ -int -lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist, - int cnt) -{ - struct lpfc_scsi_buf *psb = NULL; - struct lpfc_mbx_post_uembed_sgl_page1 *sgl; - struct sgl_page_pairs *sgl_pg_pairs; - void *viraddr; - LPFC_MBOXQ_t *mbox; - uint32_t reqlen, alloclen, pg_pairs; - uint32_t mbox_tmo; - uint16_t xri_start = 0, scsi_xri_start; - uint16_t rsrc_range; - int rc = 0, avail_cnt; - uint32_t shdr_status, shdr_add_status; - dma_addr_t pdma_phys_bpl1; - union lpfc_sli4_cfg_shdr *shdr; - struct lpfc_rsrc_blks *rsrc_blk; - uint32_t xri_cnt = 0; - - /* Calculate the total requested length of the dma memory */ - reqlen = cnt * sizeof(struct sgl_page_pairs) + - sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); - if (reqlen > SLI4_PAGE_SIZE) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "2932 Block sgl registration required DMA " - "size (%d) great than a page\n", reqlen); - return -ENOMEM; - } - - /* - * The use of extents requires the driver to post the sgl headers - * in multiple postings to meet the contiguous resource assignment. - */ - psb = list_prepare_entry(psb, sblist, list); - scsi_xri_start = phba->sli4_hba.scsi_xri_start; - list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list, - list) { - rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size; - if (rsrc_range < scsi_xri_start) - continue; - else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size) - continue; - else - avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used; - - reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) + - sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); - /* - * Allocate DMA memory and set up the non-embedded mailbox - * command. The mbox is used to post an SGL page per loop - * but the DMA memory has a use-once semantic so the mailbox - * is used and freed per loop pass. - */ - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2933 Failed to allocate mbox cmd " - "memory\n"); - return -ENOMEM; - } - alloclen = lpfc_sli4_config(phba, mbox, - LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, - reqlen, - LPFC_SLI4_MBX_NEMBED); - if (alloclen < reqlen) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2934 Allocated DMA memory size (%d) " - "is less than the requested DMA memory " - "size (%d)\n", alloclen, reqlen); - lpfc_sli4_mbox_cmd_free(phba, mbox); - return -ENOMEM; - } - - /* Get the first SGE entry from the non-embedded DMA memory */ - viraddr = mbox->sge_array->addr[0]; - - /* Set up the SGL pages in the non-embedded DMA pages */ - sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; - sgl_pg_pairs = &sgl->sgl_pg_pairs; - - /* pg_pairs tracks posted SGEs per loop iteration. */ - pg_pairs = 0; - list_for_each_entry_continue(psb, sblist, list) { - /* Set up the sge entry */ - sgl_pg_pairs->sgl_pg0_addr_lo = - cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); - sgl_pg_pairs->sgl_pg0_addr_hi = - cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) - pdma_phys_bpl1 = psb->dma_phys_bpl + - SGL_PAGE_SIZE; - else - pdma_phys_bpl1 = 0; - sgl_pg_pairs->sgl_pg1_addr_lo = - cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); - sgl_pg_pairs->sgl_pg1_addr_hi = - cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); - /* Keep the first xri for this extent. */ - if (pg_pairs == 0) - xri_start = psb->cur_iocbq.sli4_xritag; - sgl_pg_pairs++; - pg_pairs++; - xri_cnt++; - - /* - * Track two exit conditions - the loop has constructed - * all of the caller's SGE pairs or all available - * resource IDs in this extent are consumed. - */ - if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt)) - break; - } - rsrc_blk->rsrc_used += pg_pairs; - bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start); - bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3016 Post SCSI Extent SGL, start %d, cnt %d " - "blk use %d\n", - xri_start, pg_pairs, rsrc_blk->rsrc_used); - /* Perform endian conversion if necessary */ - sgl->word0 = cpu_to_le32(sgl->word0); - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - else { - mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); - rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); - } - shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, - &shdr->response); - if (rc != MBX_TIMEOUT) - lpfc_sli4_mbox_cmd_free(phba, mbox); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2935 POST_SGL_BLOCK mailbox command " - "failed status x%x add_status x%x " - "mbx status x%x\n", - shdr_status, shdr_add_status, rc); - return -ENXIO; - } - - /* Post only what is requested. */ - if (xri_cnt >= cnt) - break; - } - return rc; -} - -/** * lpfc_fc_frame_check - Check that this frame is a valid frame to handle * @phba: pointer to lpfc_hba struct that the frame was received on * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) @@ -13722,13 +14632,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) } lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "2538 Received frame rctl:%s type:%s " - "Frame Data:%08x %08x %08x %08x %08x %08x\n", - rctl_names[fc_hdr->fh_r_ctl], - type_names[fc_hdr->fh_type], + "2538 Received frame rctl:%s (x%x), type:%s (x%x), " + "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", + rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, + type_names[fc_hdr->fh_type], fc_hdr->fh_type, be32_to_cpu(header[0]), be32_to_cpu(header[1]), be32_to_cpu(header[2]), be32_to_cpu(header[3]), - be32_to_cpu(header[4]), be32_to_cpu(header[5])); + be32_to_cpu(header[4]), be32_to_cpu(header[5]), + be32_to_cpu(header[6])); return 0; drop: lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, @@ -13778,8 +14689,13 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, uint32_t did = (fc_hdr->fh_d_id[0] << 16 | fc_hdr->fh_d_id[1] << 8 | fc_hdr->fh_d_id[2]); + if (did == Fabric_DID) return phba->pport; + if ((phba->pport->fc_flag & FC_PT2PT) && + !(phba->link_state == LPFC_HBA_READY)) + return phba->pport; + vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { @@ -14026,6 +14942,40 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, } /** + * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp + * @vport: pointer to a vitural port + * @dmabuf: pointer to a dmabuf that describes the FC sequence + * + * This function tries to abort from the assembed sequence from upper level + * protocol, described by the information from basic abbort @dmabuf. It + * checks to see whether such pending context exists at upper level protocol. + * If so, it shall clean up the pending context. + * + * Return + * true -- if there is matching pending context of the sequence cleaned + * at ulp; + * false -- if there is no matching pending context of the sequence present + * at ulp. + **/ +static bool +lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) +{ + struct lpfc_hba *phba = vport->phba; + int handled; + + /* Accepting abort at ulp with SLI4 only */ + if (phba->sli_rev < LPFC_SLI_REV4) + return false; + + /* Register all caring upper level protocols to attend abort */ + handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); + if (handled) + return true; + + return false; +} + +/** * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler * @phba: Pointer to HBA context object. * @cmd_iocbq: pointer to the command iocbq structure. @@ -14040,8 +14990,21 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmd_iocbq, struct lpfc_iocbq *rsp_iocbq) { - if (cmd_iocbq) + struct lpfc_nodelist *ndlp; + + if (cmd_iocbq) { + ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; + lpfc_nlp_put(ndlp); + lpfc_nlp_not_used(ndlp); lpfc_sli_release_iocbq(phba, cmd_iocbq); + } + + /* Failure means BLS ABORT RSP did not get delivered to remote node*/ + if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3154 BLS ABORT RSP failed, data: x%x/x%x\n", + rsp_iocbq->iocb.ulpStatus, + rsp_iocbq->iocb.un.ulpWord[4]); } /** @@ -14065,7 +15028,6 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba, return NO_XRI; } - /** * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort * @phba: Pointer to HBA context object. @@ -14075,12 +15037,13 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba, * event after aborting the sequence handling. **/ static void -lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, - struct fc_frame_header *fc_hdr) +lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, + struct fc_frame_header *fc_hdr, bool aborted) { + struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *ctiocb = NULL; struct lpfc_nodelist *ndlp; - uint16_t oxid, rxid; + uint16_t oxid, rxid, xri, lxri; uint32_t sid, fctl; IOCB_t *icmd; int rc; @@ -14092,15 +15055,28 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, oxid = be16_to_cpu(fc_hdr->fh_ox_id); rxid = be16_to_cpu(fc_hdr->fh_rx_id); - ndlp = lpfc_findnode_did(phba->pport, sid); + ndlp = lpfc_findnode_did(vport, sid); if (!ndlp) { - lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, - "1268 Find ndlp returned NULL for oxid:x%x " - "SID:x%x\n", oxid, sid); - return; + ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, + "1268 Failed to allocate ndlp for " + "oxid:x%x SID:x%x\n", oxid, sid); + return; + } + lpfc_nlp_init(vport, ndlp, sid); + /* Put ndlp onto pport node list */ + lpfc_enqueue_node(vport, ndlp); + } else if (!NLP_CHK_NODE_ACT(ndlp)) { + /* re-setup ndlp without removing from node list */ + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, + "3275 Failed to active ndlp found " + "for oxid:x%x SID:x%x\n", oxid, sid); + return; + } } - if (lpfc_sli4_xri_inrange(phba, rxid)) - lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); /* Allocate buffer for rsp iocb */ ctiocb = lpfc_sli_get_iocbq(phba); @@ -14123,7 +15099,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, icmd->ulpLe = 1; icmd->ulpClass = CLASS3; icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; - ctiocb->context1 = ndlp; + ctiocb->context1 = lpfc_nlp_get(ndlp); ctiocb->iocb_cmpl = NULL; ctiocb->vport = phba->pport; @@ -14131,14 +15107,35 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, ctiocb->sli4_lxritag = NO_XRI; ctiocb->sli4_xritag = NO_XRI; - /* If the oxid maps to the FCP XRI range or if it is out of range, - * send a BLS_RJT. The driver no longer has that exchange. - * Override the IOCB for a BA_RJT. + if (fctl & FC_FC_EX_CTX) + /* Exchange responder sent the abort so we + * own the oxid. + */ + xri = oxid; + else + xri = rxid; + lxri = lpfc_sli4_xri_inrange(phba, xri); + if (lxri != NO_XRI) + lpfc_set_rrq_active(phba, ndlp, lxri, + (xri == oxid) ? rxid : oxid, 0); + /* For BA_ABTS from exchange responder, if the logical xri with + * the oxid maps to the FCP XRI range, the port no longer has + * that exchange context, send a BLS_RJT. Override the IOCB for + * a BA_RJT. + */ + if ((fctl & FC_FC_EX_CTX) && + (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) { + icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; + bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); + bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); + bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); + } + + /* If BA_ABTS failed to abort a partially assembled receive sequence, + * the driver no longer has that exchange, send a BLS_RJT. Override + * the IOCB for a BA_RJT. */ - if (oxid > (phba->sli4_hba.max_cfg_param.max_xri + - phba->sli4_hba.max_cfg_param.xri_base) || - oxid > (lpfc_sli4_get_els_iocb_cnt(phba) + - phba->sli4_hba.max_cfg_param.xri_base)) { + if (aborted == false) { icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); @@ -14151,29 +15148,30 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, * field and RX_ID from ABTS for RX_ID field. */ bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); - bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); } else { /* ABTS sent by initiator to CT exchange, construction * of BA_ACC will need to allocate a new XRI as for the - * XRI_TAG and RX_ID fields. + * XRI_TAG field. */ bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); - bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI); } + bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); /* Xmit CT abts response on exchange <xid> */ - lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", - icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", + icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); if (rc == IOCB_ERROR) { - lpfc_printf_log(phba, KERN_ERR, LOG_ELS, - "2925 Failed to issue CT ABTS RSP x%x on " - "xri x%x, Data x%x\n", - icmd->un.xseq64.w5.hcsw.Rctl, oxid, - phba->link_state); + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2925 Failed to issue CT ABTS RSP x%x on " + "xri x%x, Data x%x\n", + icmd->un.xseq64.w5.hcsw.Rctl, oxid, + phba->link_state); + lpfc_nlp_put(ndlp); + ctiocb->context1 = NULL; lpfc_sli_release_iocbq(phba, ctiocb); } } @@ -14198,32 +15196,25 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, struct lpfc_hba *phba = vport->phba; struct fc_frame_header fc_hdr; uint32_t fctl; - bool abts_par; + bool aborted; /* Make a copy of fc_hdr before the dmabuf being released */ memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); fctl = sli4_fctl_from_fc_hdr(&fc_hdr); if (fctl & FC_FC_EX_CTX) { - /* - * ABTS sent by responder to exchange, just free the buffer - */ - lpfc_in_buf_free(phba, &dmabuf->dbuf); + /* ABTS by responder to exchange, no cleanup needed */ + aborted = true; } else { - /* - * ABTS sent by initiator to exchange, need to do cleanup - */ - /* Try to abort partially assembled seq */ - abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf); - - /* Send abort to ULP if partially seq abort failed */ - if (abts_par == false) - lpfc_sli4_send_seq_to_ulp(vport, dmabuf); - else - lpfc_in_buf_free(phba, &dmabuf->dbuf); + /* ABTS by initiator to exchange, need to do cleanup */ + aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); + if (aborted == false) + aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); } - /* Send basic accept (BA_ACC) to the abort requester */ - lpfc_sli4_seq_abort_rsp(phba, &fc_hdr); + lpfc_in_buf_free(phba, &dmabuf->dbuf); + + /* Respond with BA_ACC or BA_RJT accordingly */ + lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); } /** @@ -14310,7 +15301,15 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) /* Initialize the first IOCB. */ first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; - first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; + + /* Check FC Header to see what TYPE of frame we are rcv'ing */ + if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { + first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; + first_iocbq->iocb.un.rcvels.parmRo = + sli4_did_from_fc_hdr(fc_hdr); + first_iocbq->iocb.ulpPU = PARM_NPIV_DID; + } else + first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; first_iocbq->iocb.ulpContext = NO_XRI; first_iocbq->iocb.unsli3.rcvsli3.ox_id = be16_to_cpu(fc_hdr->fh_ox_id); @@ -14318,14 +15317,20 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->phba->vpi_ids[vport->vpi]; /* put the first buffer into the first IOCBq */ + tot_len = bf_get(lpfc_rcqe_length, + &seq_dmabuf->cq_event.cqe.rcqe_cmpl); + first_iocbq->context2 = &seq_dmabuf->dbuf; first_iocbq->context3 = NULL; first_iocbq->iocb.ulpBdeCount = 1; - first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = + if (tot_len > LPFC_DATA_BUF_SIZE) + first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = LPFC_DATA_BUF_SIZE; + else + first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; + first_iocbq->iocb.un.rcvels.remoteID = sid; - tot_len = bf_get(lpfc_rcqe_length, - &seq_dmabuf->cq_event.cqe.rcqe_cmpl); + first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; } iocbq = first_iocbq; @@ -14341,14 +15346,17 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) if (!iocbq->context3) { iocbq->context3 = d_buf; iocbq->iocb.ulpBdeCount++; - pbde = (struct ulp_bde64 *) - &iocbq->iocb.unsli3.sli3Words[4]; - pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; - /* We need to get the size out of the right CQE */ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); len = bf_get(lpfc_rcqe_length, &hbq_buf->cq_event.cqe.rcqe_cmpl); + pbde = (struct ulp_bde64 *) + &iocbq->iocb.unsli3.sli3Words[4]; + if (len > LPFC_DATA_BUF_SIZE) + pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; + else + pbde->tus.f.bdeSize = len; + iocbq->iocb.unsli3.rcvsli3.acc_len += len; tot_len += len; } else { @@ -14363,16 +15371,19 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) lpfc_in_buf_free(vport->phba, d_buf); continue; } + /* We need to get the size out of the right CQE */ + hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); + len = bf_get(lpfc_rcqe_length, + &hbq_buf->cq_event.cqe.rcqe_cmpl); iocbq->context2 = d_buf; iocbq->context3 = NULL; iocbq->iocb.ulpBdeCount = 1; - iocbq->iocb.un.cont64[0].tus.f.bdeSize = + if (len > LPFC_DATA_BUF_SIZE) + iocbq->iocb.un.cont64[0].tus.f.bdeSize = LPFC_DATA_BUF_SIZE; + else + iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; - /* We need to get the size out of the right CQE */ - hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); - len = bf_get(lpfc_rcqe_length, - &hbq_buf->cq_event.cqe.rcqe_cmpl); tot_len += len; iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; @@ -14440,6 +15451,7 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr; struct lpfc_vport *vport; uint32_t fcfi; + uint32_t did; /* Process each received buffer */ fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; @@ -14455,12 +15467,32 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, else fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); + vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); - if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) { + if (!vport) { /* throw out the frame */ lpfc_in_buf_free(phba, &dmabuf->dbuf); return; } + + /* d_id this frame is directed to */ + did = sli4_did_from_fc_hdr(fc_hdr); + + /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ + if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && + (did != Fabric_DID)) { + /* + * Throw out the frame if we are not pt2pt. + * The pt2pt protocol allows for discovery frames + * to be received without a registered VPI. + */ + if (!(vport->fc_flag & FC_PT2PT) || + (phba->link_state == LPFC_HBA_READY)) { + lpfc_in_buf_free(phba, &dmabuf->dbuf); + return; + } + } + /* Handle the basic abort sequence (BA_ABTS) event */ if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { lpfc_sli4_handle_unsol_abort(vport, dmabuf); @@ -14633,6 +15665,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) uint16_t max_rpi, rpi_limit; uint16_t rpi_remaining, lrpi = 0; struct lpfc_rpi_hdr *rpi_hdr; + unsigned long iflag; max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; rpi_limit = phba->sli4_hba.next_rpi; @@ -14641,7 +15674,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) * Fetch the next logical rpi. Because this index is logical, * the driver starts at 0 each time. */ - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflag); rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); if (rpi >= rpi_limit) rpi = LPFC_RPI_ALLOC_ERROR; @@ -14657,7 +15690,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) */ if ((rpi == LPFC_RPI_ALLOC_ERROR) && (phba->sli4_hba.rpi_count >= max_rpi)) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflag); return rpi; } @@ -14666,7 +15699,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) * extents. */ if (!phba->sli4_hba.rpi_hdrs_in_use) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflag); return rpi; } @@ -14677,7 +15710,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) * how many are supported max by the device. */ rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflag); if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); if (!rpi_hdr) { @@ -14748,7 +15781,8 @@ lpfc_sli4_remove_rpis(struct lpfc_hba *phba) * provided rpi via a bitmask. **/ int -lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp) +lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, + void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) { LPFC_MBOXQ_t *mboxq; struct lpfc_hba *phba = ndlp->phba; @@ -14761,6 +15795,13 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp) /* Post all rpi memory regions to the port. */ lpfc_resume_rpi(mboxq, ndlp); + if (cmpl) { + mboxq->mbox_cmpl = cmpl; + mboxq->context1 = arg; + mboxq->context2 = ndlp; + } else + mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mboxq->vport = ndlp->vport; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -14982,6 +16023,7 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) LPFC_MBOXQ_t *mboxq; phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; + phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -15143,11 +16185,18 @@ lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) LPFC_SLI4_FCF_TBL_INDX_MAX); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "3060 Last IDX %d\n", last_index); - if (list_empty(&phba->fcf.fcf_pri_list)) { + + /* Verify the priority list has 2 or more entries */ + spin_lock_irq(&phba->hbalock); + if (list_empty(&phba->fcf.fcf_pri_list) || + list_is_singular(&phba->fcf.fcf_pri_list)) { + spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "3061 Last IDX %d\n", last_index); return 0; /* Empty rr list */ } + spin_unlock_irq(&phba->hbalock); + next_fcf_pri = 0; /* * Clear the rr_bmask and set all of the bits that are at this @@ -15177,7 +16226,7 @@ lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) /* * if next_fcf_pri was not set above and the list is not empty then * we have failed flogis on all of them. So reset flogi failed - * and start at the begining. + * and start at the beginning. */ if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { @@ -15218,10 +16267,13 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) { uint16_t next_fcf_index; +initial_priority: /* Search start from next bit of currently registered FCF index */ + next_fcf_index = phba->fcf.current_rec.fcf_indx; + next_priority: - next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) % - LPFC_SLI4_FCF_TBL_INDX_MAX; + /* Determine the next fcf index to check */ + next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, LPFC_SLI4_FCF_TBL_INDX_MAX, next_fcf_index); @@ -15248,7 +16300,7 @@ next_priority: * at that level and continue the selection process. */ if (lpfc_check_next_fcf_pri_level(phba)) - goto next_priority; + goto initial_priority; lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2844 No roundrobin failover FCF available\n"); if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) @@ -15318,7 +16370,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) { - struct lpfc_fcf_pri *fcf_pri; + struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2762 FCF (x%x) reached driver's book " @@ -15328,7 +16380,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) } /* Clear the eligible FCF record index bmask */ spin_lock_irq(&phba->hbalock); - list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { + list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, + list) { if (fcf_pri->fcf_rec.fcf_index == fcf_index) { list_del_init(&fcf_pri->list); break; @@ -15898,41 +16951,41 @@ lpfc_drain_txq(struct lpfc_hba *phba) { LIST_HEAD(completions); struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; - struct lpfc_iocbq *piocbq = 0; + struct lpfc_iocbq *piocbq = NULL; unsigned long iflags = 0; char *fail_msg = NULL; struct lpfc_sglq *sglq; union lpfc_wqe wqe; + int txq_cnt = 0; - spin_lock_irqsave(&phba->hbalock, iflags); - if (pring->txq_cnt > pring->txq_max) - pring->txq_max = pring->txq_cnt; + spin_lock_irqsave(&pring->ring_lock, iflags); + list_for_each_entry(piocbq, &pring->txq, list) { + txq_cnt++; + } - spin_unlock_irqrestore(&phba->hbalock, iflags); + if (txq_cnt > pring->txq_max) + pring->txq_max = txq_cnt; - while (pring->txq_cnt) { - spin_lock_irqsave(&phba->hbalock, iflags); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + + while (!list_empty(&pring->txq)) { + spin_lock_irqsave(&pring->ring_lock, iflags); piocbq = lpfc_sli_ringtx_get(phba, pring); + if (!piocbq) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2823 txq empty and txq_cnt is %d\n ", + txq_cnt); + break; + } sglq = __lpfc_sli_get_sglq(phba, piocbq); if (!sglq) { __lpfc_sli_ringtx_put(phba, pring, piocbq); - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&pring->ring_lock, iflags); break; - } else { - if (!piocbq) { - /* The txq_cnt out of sync. This should - * never happen - */ - sglq = __lpfc_clear_active_sglq(phba, - sglq->sli4_lxritag); - spin_unlock_irqrestore(&phba->hbalock, iflags); - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2823 txq empty and txq_cnt is %d\n ", - pring->txq_cnt); - break; - } } + txq_cnt--; /* The xri and iocb resources secured, * attempt to issue request @@ -15957,12 +17010,12 @@ lpfc_drain_txq(struct lpfc_hba *phba) piocbq->iotag, piocbq->sli4_xritag); list_add_tail(&piocbq->list, &completions); } - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&pring->ring_lock, iflags); } /* Cancel all the IOCBs that cannot be issued */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); - return pring->txq_cnt; + return txq_cnt; } diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 29c13b63e32..edb48832c39 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -58,9 +58,10 @@ struct lpfc_iocbq { IOCB_t iocb; /* IOCB cmd */ uint8_t retry; /* retry counter for IOCB cmd - if needed */ - uint16_t iocb_flag; + uint32_t iocb_flag; #define LPFC_IO_LIBDFC 1 /* libdfc iocb */ -#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */ +#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */ +#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */ #define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ #define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ #define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ @@ -68,12 +69,17 @@ struct lpfc_iocbq { #define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */ #define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ #define DSS_SECURITY_OP 0x100 /* security IO */ -#define LPFC_IO_ON_Q 0x200 /* The IO is still on the TXCMPLQ */ +#define LPFC_IO_ON_TXCMPLQ 0x200 /* The IO is still on the TXCMPLQ */ +#define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */ +#define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */ +#define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */ +#define LPFC_IO_CMD_OUTSTANDING 0x2000 /* timeout handler abort window */ #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ #define LPFC_FIP_ELS_ID_SHIFT 14 - uint8_t rsvd2; +#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */ + uint32_t drvrTimeout; /* driver timeout in seconds */ uint32_t fcp_wqidx; /* index to FCP work queue */ struct lpfc_vport *vport;/* virtual port pointer */ @@ -90,6 +96,8 @@ struct lpfc_iocbq { void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); + void (*wait_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *); void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); }; @@ -130,7 +138,9 @@ typedef struct lpfcMboxq { #define LPFC_MAX_RING_MASK 5 /* max num of rctl/type masks allowed per ring */ -#define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */ +#define LPFC_SLI3_MAX_RING 4 /* Max num of SLI3 rings used by driver. + For SLI4, an additional ring for each + FCP WQ will be allocated. */ struct lpfc_sli_ring; @@ -157,6 +167,24 @@ struct lpfc_sli_ring_stat { uint64_t iocb_rsp_full; /* IOCB rsp ring full */ }; +struct lpfc_sli3_ring { + uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */ + uint32_t next_cmdidx; /* next_cmd index */ + uint32_t rspidx; /* current index in response ring */ + uint32_t cmdidx; /* current index in command ring */ + uint16_t numCiocb; /* number of command iocb's per ring */ + uint16_t numRiocb; /* number of rsp iocb's per ring */ + uint16_t sizeCiocb; /* Size of command iocb's in this ring */ + uint16_t sizeRiocb; /* Size of response iocb's in this ring */ + uint32_t *cmdringaddr; /* virtual address for cmd rings */ + uint32_t *rspringaddr; /* virtual address for rsp rings */ +}; + +struct lpfc_sli4_ring { + struct lpfc_queue *wqp; /* Pointer to associated WQ */ +}; + + /* Structure used to hold SLI ring information */ struct lpfc_sli_ring { uint16_t flag; /* ring flags */ @@ -165,16 +193,10 @@ struct lpfc_sli_ring { #define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */ uint16_t abtsiotag; /* tracks next iotag to use for ABTS */ - uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */ - uint32_t next_cmdidx; /* next_cmd index */ - uint32_t rspidx; /* current index in response ring */ - uint32_t cmdidx; /* current index in command ring */ uint8_t rsvd; uint8_t ringno; /* ring number */ - uint16_t numCiocb; /* number of command iocb's per ring */ - uint16_t numRiocb; /* number of rsp iocb's per ring */ - uint16_t sizeCiocb; /* Size of command iocb's in this ring */ - uint16_t sizeRiocb; /* Size of response iocb's in this ring */ + + spinlock_t ring_lock; /* lock for issuing commands */ uint32_t fast_iotag; /* max fastlookup based iotag */ uint32_t iotag_ctr; /* keeps track of the next iotag to use */ @@ -185,8 +207,6 @@ struct lpfc_sli_ring { struct list_head txcmplq; uint16_t txcmplq_cnt; /* current length of queue */ uint16_t txcmplq_max; /* max length */ - uint32_t *cmdringaddr; /* virtual address for cmd rings */ - uint32_t *rspringaddr; /* virtual address for rsp rings */ uint32_t missbufcnt; /* keep track of buffers to post */ struct list_head postbufq; uint16_t postbufq_cnt; /* current length of queue */ @@ -206,6 +226,10 @@ struct lpfc_sli_ring { /* cmd ring available */ void (*lpfc_sli_cmd_available) (struct lpfc_hba *, struct lpfc_sli_ring *); + union { + struct lpfc_sli3_ring sli3; + struct lpfc_sli4_ring sli4; + } sli; }; /* Structure used for configuring rings to a specific profile or rctl / type */ @@ -238,6 +262,8 @@ struct lpfc_sli_stat { uint64_t mbox_stat_err; /* Mbox cmds completed status error */ uint64_t mbox_cmd; /* Mailbox commands issued */ uint64_t sli_intr; /* Count of Host Attention interrupts */ + uint64_t sli_prev_intr; /* Previous cnt of Host Attention interrupts */ + uint64_t sli_ips; /* Host Attention interrupts per sec */ uint32_t err_attn_event; /* Error Attn event counters */ uint32_t link_event; /* Link event counters */ uint32_t mbox_event; /* Mailbox event counters */ @@ -269,7 +295,7 @@ struct lpfc_sli { #define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ #define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */ - struct lpfc_sli_ring ring[LPFC_MAX_RING]; + struct lpfc_sli_ring *ring; int fcp_ring; /* ring used for FCP initiator commands */ int next_ring; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 3f266e2c54e..7f50aa04d66 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2011 Emulex. All rights reserved. * + * Copyright (C) 2009-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -34,18 +34,14 @@ /* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ #define LPFC_NEMBED_MBOX_SGL_CNT 254 -/* Multi-queue arrangement for fast-path FCP work queues */ -#define LPFC_FN_EQN_MAX 8 -#define LPFC_SP_EQN_DEF 1 -#define LPFC_FP_EQN_DEF 4 -#define LPFC_FP_EQN_MIN 1 -#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF) +/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ +#define LPFC_FCP_IO_CHAN_DEF 4 +#define LPFC_FCP_IO_CHAN_MIN 1 +#define LPFC_FCP_IO_CHAN_MAX 16 -#define LPFC_FN_WQN_MAX 32 -#define LPFC_SP_WQN_DEF 1 -#define LPFC_FP_WQN_DEF 4 -#define LPFC_FP_WQN_MIN 1 -#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF) +/* Number of channels used for Flash Optimized Fabric (FOF) operations */ + +#define LPFC_FOF_IO_CHAN_NUM 1 /* * Provide the default FCF Record attributes used by the driver @@ -75,13 +71,24 @@ (fc_hdr)->fh_s_id[1] << 8 | \ (fc_hdr)->fh_s_id[2]) +#define sli4_did_from_fc_hdr(fc_hdr) \ + ((fc_hdr)->fh_d_id[0] << 16 | \ + (fc_hdr)->fh_d_id[1] << 8 | \ + (fc_hdr)->fh_d_id[2]) + #define sli4_fctl_from_fc_hdr(fc_hdr) \ ((fc_hdr)->fh_f_ctl[0] << 16 | \ (fc_hdr)->fh_f_ctl[1] << 8 | \ (fc_hdr)->fh_f_ctl[2]) +#define sli4_type_from_fc_hdr(fc_hdr) \ + ((fc_hdr)->fh_type) + #define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000 +#define INT_FW_UPGRADE 0 +#define RUN_FW_UPGRADE 1 + enum lpfc_sli4_queue_type { LPFC_EQ, LPFC_GCQ, @@ -114,6 +121,7 @@ union sli4_qe { struct lpfc_rcqe_complete *rcqe_complete; struct lpfc_mqe *mqe; union lpfc_wqe *wqe; + union lpfc_wqe128 *wqe128; struct lpfc_rqe *rqe; }; @@ -133,11 +141,46 @@ struct lpfc_queue { uint32_t page_count; /* Number of pages allocated for this queue */ uint32_t host_index; /* The host's index for putting or getting */ uint32_t hba_index; /* The last known hba index for get or put */ + + struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */ + + uint16_t db_format; +#define LPFC_DB_RING_FORMAT 0x01 +#define LPFC_DB_LIST_FORMAT 0x02 + void __iomem *db_regaddr; + /* For q stats */ + uint32_t q_cnt_1; + uint32_t q_cnt_2; + uint32_t q_cnt_3; + uint64_t q_cnt_4; +/* defines for EQ stats */ +#define EQ_max_eqe q_cnt_1 +#define EQ_no_entry q_cnt_2 +#define EQ_badstate q_cnt_3 +#define EQ_processed q_cnt_4 + +/* defines for CQ stats */ +#define CQ_mbox q_cnt_1 +#define CQ_max_cqe q_cnt_1 +#define CQ_release_wqe q_cnt_2 +#define CQ_xri_aborted q_cnt_3 +#define CQ_wq q_cnt_4 + +/* defines for WQ stats */ +#define WQ_overflow q_cnt_1 +#define WQ_posted q_cnt_4 + +/* defines for RQ stats */ +#define RQ_no_posted_buf q_cnt_1 +#define RQ_no_buf_found q_cnt_2 +#define RQ_buf_trunc q_cnt_3 +#define RQ_rcv_buf q_cnt_4 + union sli4_qe qe[1]; /* array to index entries (must be last) */ }; struct lpfc_sli4_link { - uint8_t speed; + uint16_t speed; uint8_t duplex; uint8_t status; uint8_t type; @@ -287,12 +330,14 @@ struct lpfc_bmbx { #define LPFC_EQE_SIZE_16B 16 #define LPFC_CQE_SIZE 16 #define LPFC_WQE_SIZE 64 +#define LPFC_WQE128_SIZE 128 #define LPFC_MQE_SIZE 256 #define LPFC_RQE_SIZE 8 #define LPFC_EQE_DEF_COUNT 1024 #define LPFC_CQE_DEF_COUNT 1024 #define LPFC_WQE_DEF_COUNT 256 +#define LPFC_WQE128_DEF_COUNT 128 #define LPFC_MQE_DEF_COUNT 16 #define LPFC_RQE_DEF_COUNT 512 @@ -308,11 +353,6 @@ struct lpfc_bmbx { #define SLI4_CT_VFI 2 #define SLI4_CT_FCFI 3 -#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000 -#define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000 -#define LPFC_SLI4_MIN_BUF_SIZE 0x400 -#define LPFC_SLI4_MAX_BUF_SIZE 0x20000 - /* * SLI4 specific data structures */ @@ -342,6 +382,7 @@ struct lpfc_hba; struct lpfc_fcp_eq_hdl { uint32_t idx; struct lpfc_hba *phba; + atomic_t fcp_eq_in_use; }; /* Port Capabilities for SLI4 Parameters */ @@ -362,6 +403,7 @@ struct lpfc_pc_sli4_params { uint32_t if_page_sz; uint32_t rq_db_window; uint32_t loopbk_scope; + uint32_t oas_supported; uint32_t eq_pages_max; uint32_t eqe_size; uint32_t cq_pages_max; @@ -382,6 +424,9 @@ struct lpfc_pc_sli4_params { uint8_t mqv; uint8_t wqv; uint8_t rqv; + uint8_t wqsize; +#define LPFC_WQ_SZ64_SUPPORT 1 +#define LPFC_WQ_SZ128_SUPPORT 2 }; struct lpfc_iov { @@ -399,6 +444,20 @@ struct lpfc_sli4_lnk_info { uint8_t lnk_no; }; +#define LPFC_SLI4_HANDLER_CNT (LPFC_FCP_IO_CHAN_MAX+ \ + LPFC_FOF_IO_CHAN_NUM) +#define LPFC_SLI4_HANDLER_NAME_SZ 16 + +/* Used for IRQ vector to CPU mapping */ +struct lpfc_vector_map_info { + uint16_t phys_id; + uint16_t core_id; + uint16_t irq; + uint16_t channel_id; + struct cpumask maskbits; +}; +#define LPFC_VECTOR_MAP_EMPTY 0xffff + /* SLI4 HBA data structure entries */ struct lpfc_sli4_hba { void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for @@ -455,20 +514,36 @@ struct lpfc_sli4_hba { struct lpfc_register sli_intf; struct lpfc_pc_sli4_params pc_sli4_params; struct msix_entry *msix_entries; - uint32_t cfg_eqn; - uint32_t msix_vec_nr; + uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ]; struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ + /* Pointers to the constructed SLI4 queues */ - struct lpfc_queue **fp_eq; /* Fast-path event queue */ - struct lpfc_queue *sp_eq; /* Slow-path event queue */ + struct lpfc_queue **hba_eq;/* Event queues for HBA */ + struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */ + uint16_t *fcp_cq_map; + + struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ + struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */ struct lpfc_queue *els_wq; /* Slow-path ELS work queue */ struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ - struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ - struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ - struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ + + uint32_t fw_func_mode; /* FW function protocol mode */ + uint32_t ulp0_mode; /* ULP0 protocol mode */ + uint32_t ulp1_mode; /* ULP1 protocol mode */ + + struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */ + + /* Optimized Access Storage specific queues/structures */ + + struct lpfc_queue *oas_cq; /* OAS completion queue */ + struct lpfc_queue *oas_wq; /* OAS Work queue */ + struct lpfc_sli_ring *oas_ring; + uint64_t oas_next_lun; + uint8_t oas_next_tgt_wwpn[8]; + uint8_t oas_next_vpt_wwpn[8]; /* Setup information for various queue parameters */ int eq_esize; @@ -493,14 +568,12 @@ struct lpfc_sli4_hba { uint16_t next_rpi; uint16_t scsi_xri_max; uint16_t scsi_xri_cnt; + uint16_t els_xri_cnt; uint16_t scsi_xri_start; struct list_head lpfc_free_sgl_list; struct list_head lpfc_sgl_list; - struct lpfc_sglq **lpfc_els_sgl_array; struct list_head lpfc_abts_els_sgl_list; - struct lpfc_scsi_buf **lpfc_scsi_psb_array; struct list_head lpfc_abts_scsi_buf_list; - uint32_t total_sglq_bufs; struct lpfc_sglq **lpfc_sglq_active_list; struct list_head lpfc_rpi_hdr_list; unsigned long *rpi_bmask; @@ -509,7 +582,6 @@ struct lpfc_sli4_hba { struct list_head lpfc_rpi_blk_list; unsigned long *xri_bmask; uint16_t *xri_ids; - uint16_t xri_count; struct list_head lpfc_xri_blk_list; unsigned long *vfi_bmask; uint16_t *vfi_ids; @@ -530,6 +602,12 @@ struct lpfc_sli4_hba { struct lpfc_iov iov; spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ + + /* CPU to vector mapping information */ + struct lpfc_vector_map_info *cpu_map; + uint16_t num_online_cpu; + uint16_t num_present_cpu; + uint16_t curr_disp_cpu; }; enum lpfc_sge_type { @@ -592,7 +670,8 @@ void lpfc_sli4_hba_reset(struct lpfc_hba *); struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, uint32_t); void lpfc_sli4_queue_free(struct lpfc_queue *); -uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t); +uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t); +uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t); uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t, uint32_t); int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, @@ -613,12 +692,9 @@ void lpfc_sli4_queue_unset(struct lpfc_hba *); int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); +void lpfc_sli4_free_xri(struct lpfc_hba *, int); int lpfc_sli4_post_async_mbox(struct lpfc_hba *); -int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba); -int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba); int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); -int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *, - int); struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *); void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); @@ -633,7 +709,8 @@ void lpfc_sli4_free_rpi(struct lpfc_hba *, int); void lpfc_sli4_remove_rpis(struct lpfc_hba *); void lpfc_sli4_async_event_proc(struct lpfc_hba *); void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *); -int lpfc_sli4_resume_rpi(struct lpfc_nodelist *); +int lpfc_sli4_resume_rpi(struct lpfc_nodelist *, + void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *); void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index dd044d01a07..41675c1193e 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -18,11 +18,16 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.28" +#define LPFC_DRIVER_VERSION "10.2.8001.0." #define LPFC_DRIVER_NAME "lpfc" + +/* Used for SLI 2/3 */ #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" +/* Used for SLI4 */ +#define LPFC_DRIVER_HANDLER_NAME "lpfc:" + #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ LPFC_DRIVER_VERSION -#define LPFC_COPYRIGHT "Copyright(c) 2004-2009 Emulex. All rights reserved." +#define LPFC_COPYRIGHT "Copyright(c) 2004-2014 Emulex. All rights reserved." diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 0fe188e6600..a87ee33f4f2 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -80,7 +80,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport, } } -static int +int lpfc_alloc_vpi(struct lpfc_hba *phba) { unsigned long vpi; @@ -387,6 +387,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) /* Create binary sysfs attribute for vport */ lpfc_alloc_sysfs_attr(vport); + /* Set the DFT_LUN_Q_DEPTH accordingly */ + vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth; + *(struct lpfc_vport **)fc_vport->dd_data = vport; vport->fc_vport = fc_vport; @@ -568,6 +571,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct lpfc_hba *phba = vport->phba; long timeout; + bool ns_ndlp_referenced = false; if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, @@ -628,6 +632,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_debugfs_terminate(vport); + /* + * The call to fc_remove_host might release the NameServer ndlp. Since + * we might need to use the ndlp to send the DA_ID CT command, + * increment the reference for the NameServer ndlp to prevent it from + * being released. + */ + ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { + lpfc_nlp_get(ndlp); + ns_ndlp_referenced = true; + } + /* Remove FC host and then SCSI host with the vport */ fc_remove_host(lpfc_shost_from_vport(vport)); scsi_remove_host(lpfc_shost_from_vport(vport)); @@ -734,6 +750,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_discovery_wait(vport); skip_logo: + + /* + * If the NameServer ndlp has been incremented to allow the DA_ID CT + * command to be sent, decrement the ndlp now. + */ + if (ns_ndlp_referenced) { + ndlp = lpfc_findnode_did(vport, NameServer_DID); + lpfc_nlp_put(ndlp); + } + lpfc_cleanup(vport); lpfc_sli_host_down(vport); diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h index 90828340ace..6b2c94eb813 100644 --- a/drivers/scsi/lpfc/lpfc_vport.h +++ b/drivers/scsi/lpfc/lpfc_vport.h @@ -90,6 +90,7 @@ int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *); int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint); struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *); void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **); +int lpfc_alloc_vpi(struct lpfc_hba *phba); /* * queuecommand VPORT-specific return codes. Specified in the host byte code. |
