diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
| -rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 4939 | 
1 files changed, 3606 insertions, 1333 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index b3065791f30..06f9a5b79e6 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2004-2010 Emulex.  All rights reserved.           * + * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   * Portions Copyright (C) 2004-2005 Christoph Hellwig              * @@ -24,12 +24,16 @@  #include <linux/dma-mapping.h>  #include <linux/idr.h>  #include <linux/interrupt.h> +#include <linux/module.h>  #include <linux/kthread.h>  #include <linux/pci.h>  #include <linux/spinlock.h>  #include <linux/ctype.h>  #include <linux/aer.h>  #include <linux/slab.h> +#include <linux/firmware.h> +#include <linux/miscdevice.h> +#include <linux/percpu.h>  #include <scsi/scsi.h>  #include <scsi/scsi_device.h> @@ -55,16 +59,18 @@ char *_dump_buf_dif;  unsigned long _dump_buf_dif_order;  spinlock_t _dump_buf_lock; +/* Used when mapping IRQ vectors in a driver centric manner */ +uint16_t *lpfc_used_cpu; +uint32_t lpfc_present_cpu; +  static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);  static int lpfc_post_rcv_buf(struct lpfc_hba *); -static int lpfc_sli4_queue_create(struct lpfc_hba *); -static void lpfc_sli4_queue_destroy(struct lpfc_hba *); +static int lpfc_sli4_queue_verify(struct lpfc_hba *);  static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);  static int lpfc_setup_endian_order(struct lpfc_hba *); -static int lpfc_sli4_read_config(struct lpfc_hba *);  static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); -static void lpfc_free_sgl_list(struct lpfc_hba *); -static int lpfc_init_sgl_list(struct lpfc_hba *); +static void lpfc_free_els_sgl_list(struct lpfc_hba *); +static void lpfc_init_sgl_list(struct lpfc_hba *);  static int lpfc_init_active_sgl_array(struct lpfc_hba *);  static void lpfc_free_active_sgl(struct lpfc_hba *);  static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); @@ -72,6 +78,9 @@ static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);  static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);  static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);  static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); +static void lpfc_sli4_disable_intr(struct lpfc_hba *); +static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); +static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);  static struct scsi_transport_template *lpfc_transport_template = NULL;  static struct scsi_transport_template *lpfc_vport_transport_template = NULL; @@ -211,7 +220,6 @@ lpfc_config_port_prep(struct lpfc_hba *phba)  	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);  	if (!lpfc_vpd_data)  		goto out_free_mbox; -  	do {  		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);  		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); @@ -309,6 +317,45 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)  }  /** + * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, + *	cfg_soft_wwnn, cfg_soft_wwpn + * @vport: pointer to lpfc vport data structure. + * + * + * Return codes + *   None. + **/ +void +lpfc_update_vport_wwn(struct lpfc_vport *vport) +{ +	/* If the soft name exists then update it using the service params */ +	if (vport->phba->cfg_soft_wwnn) +		u64_to_wwn(vport->phba->cfg_soft_wwnn, +			   vport->fc_sparam.nodeName.u.wwn); +	if (vport->phba->cfg_soft_wwpn) +		u64_to_wwn(vport->phba->cfg_soft_wwpn, +			   vport->fc_sparam.portName.u.wwn); + +	/* +	 * If the name is empty or there exists a soft name +	 * then copy the service params name, otherwise use the fc name +	 */ +	if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) +		memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, +			sizeof(struct lpfc_name)); +	else +		memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, +			sizeof(struct lpfc_name)); + +	if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn) +		memcpy(&vport->fc_portname, &vport->fc_sparam.portName, +			sizeof(struct lpfc_name)); +	else +		memcpy(&vport->fc_sparam.portName, &vport->fc_portname, +			sizeof(struct lpfc_name)); +} + +/**   * lpfc_config_port_post - Perform lpfc initialization after config port   * @phba: pointer to lpfc hba data structure.   * @@ -377,17 +424,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)  	lpfc_mbuf_free(phba, mp->virt, mp->phys);  	kfree(mp);  	pmb->context1 = NULL; - -	if (phba->cfg_soft_wwnn) -		u64_to_wwn(phba->cfg_soft_wwnn, -			   vport->fc_sparam.nodeName.u.wwn); -	if (phba->cfg_soft_wwpn) -		u64_to_wwn(phba->cfg_soft_wwpn, -			   vport->fc_sparam.portName.u.wwn); -	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, -	       sizeof (struct lpfc_name)); -	memcpy(&vport->fc_portname, &vport->fc_sparam.portName, -	       sizeof (struct lpfc_name)); +	lpfc_update_vport_wwn(vport);  	/* Update the fc_host data structures with new wwn. */  	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); @@ -436,43 +473,36 @@ lpfc_config_port_post(struct lpfc_hba *phba)  	lpfc_sli_read_link_ste(phba);  	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */ -	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) -		phba->cfg_hba_queue_depth = -			(mb->un.varRdConfig.max_xri + 1) - -					lpfc_sli4_get_els_iocb_cnt(phba); +	i = (mb->un.varRdConfig.max_xri + 1); +	if (phba->cfg_hba_queue_depth > i) { +		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, +				"3359 HBA queue depth changed from %d to %d\n", +				phba->cfg_hba_queue_depth, i); +		phba->cfg_hba_queue_depth = i; +	} + +	/* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3)  */ +	i = (mb->un.varRdConfig.max_xri >> 3); +	if (phba->pport->cfg_lun_queue_depth > i) { +		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, +				"3360 LUN queue depth changed from %d to %d\n", +				phba->pport->cfg_lun_queue_depth, i); +		phba->pport->cfg_lun_queue_depth = i; +	}  	phba->lmt = mb->un.varRdConfig.lmt;  	/* Get the default values for Model Name and Description */  	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); -	if ((phba->cfg_link_speed > LINK_SPEED_10G) -	    || ((phba->cfg_link_speed == LINK_SPEED_1G) -		&& !(phba->lmt & LMT_1Gb)) -	    || ((phba->cfg_link_speed == LINK_SPEED_2G) -		&& !(phba->lmt & LMT_2Gb)) -	    || ((phba->cfg_link_speed == LINK_SPEED_4G) -		&& !(phba->lmt & LMT_4Gb)) -	    || ((phba->cfg_link_speed == LINK_SPEED_8G) -		&& !(phba->lmt & LMT_8Gb)) -	    || ((phba->cfg_link_speed == LINK_SPEED_10G) -		&& !(phba->lmt & LMT_10Gb))) { -		/* Reset link speed to auto */ -		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, -			"1302 Invalid speed for this board: " -			"Reset link speed to auto: x%x\n", -			phba->cfg_link_speed); -			phba->cfg_link_speed = LINK_SPEED_AUTO; -	} -  	phba->link_state = LPFC_LINK_DOWN;  	/* Only process IOCBs on ELS ring till hba_state is READY */ -	if (psli->ring[psli->extra_ring].cmdringaddr) +	if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)  		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; -	if (psli->ring[psli->fcp_ring].cmdringaddr) +	if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)  		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; -	if (psli->ring[psli->next_ring].cmdringaddr) +	if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)  		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;  	/* Post receive buffers for desired rings */ @@ -505,7 +535,10 @@ lpfc_config_port_post(struct lpfc_hba *phba)  	phba->hba_flag &= ~HBA_ERATT_HANDLED;  	/* Enable appropriate host interrupts */ -	status = readl(phba->HCregaddr); +	if (lpfc_readl(phba->HCregaddr, &status)) { +		spin_unlock_irq(&phba->hbalock); +		return -EIO; +	}  	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;  	if (psli->num_rings > 0)  		status |= HC_R0INT_ENA; @@ -526,13 +559,16 @@ lpfc_config_port_post(struct lpfc_hba *phba)  	/* Set up ring-0 (ELS) timer */  	timeout = phba->fc_ratov * 2; -	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); +	mod_timer(&vport->els_tmofunc, +		  jiffies + msecs_to_jiffies(1000 * timeout));  	/* Set up heart beat (HB) timer */ -	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); +	mod_timer(&phba->hb_tmofunc, +		  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));  	phba->hb_outstanding = 0;  	phba->last_completion_time = jiffies;  	/* Set up error attention (ERATT) polling timer */ -	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); +	mod_timer(&phba->eratt_poll, +		  jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));  	if (phba->hba_flag & LINK_DISABLED) {  		lpfc_printf_log(phba, @@ -551,29 +587,10 @@ lpfc_config_port_post(struct lpfc_hba *phba)  			return -EIO;  		}  	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { -		lpfc_init_link(phba, pmb, phba->cfg_topology, -			phba->cfg_link_speed); -		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; -		lpfc_set_loopback_flag(phba); -		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); -		if (rc != MBX_SUCCESS) { -			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"0454 Adapter failed to init, mbxCmd x%x " -				"INIT_LINK, mbxStatus x%x\n", -				mb->mbxCommand, mb->mbxStatus); - -			/* Clear all interrupt enable conditions */ -			writel(0, phba->HCregaddr); -			readl(phba->HCregaddr); /* flush */ -			/* Clear all pending interrupts */ -			writel(0xffffffff, phba->HAregaddr); -			readl(phba->HAregaddr); /* flush */ - -			phba->link_state = LPFC_HBA_ERROR; -			if (rc != MBX_BUSY) -				mempool_free(pmb, phba->mbox_mem_pool); -			return -EIO; -		} +		mempool_free(pmb, phba->mbox_mem_pool); +		rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); +		if (rc) +			return rc;  	}  	/* MBOX buffer will be freed in mbox compl */  	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -635,6 +652,28 @@ lpfc_config_port_post(struct lpfc_hba *phba)  int  lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)  { +	return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); +} + +/** + * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology + * @phba: pointer to lpfc hba data structure. + * @fc_topology: desired fc topology. + * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT + * + * This routine will issue the INIT_LINK mailbox command call. + * It is available to other drivers through the lpfc_hba data + * structure for use as a delayed link up mechanism with the + * module parameter lpfc_suppress_link_up. + * + * Return code + *              0 - success + *              Any other value - error + **/ +int +lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, +			       uint32_t flag) +{  	struct lpfc_vport *vport = phba->pport;  	LPFC_MBOXQ_t *pmb;  	MAILBOX_t *mb; @@ -648,22 +687,44 @@ lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)  	mb = &pmb->u.mb;  	pmb->vport = vport; -	lpfc_init_link(phba, pmb, phba->cfg_topology, -		phba->cfg_link_speed); +	if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || +	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && +	     !(phba->lmt & LMT_1Gb)) || +	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && +	     !(phba->lmt & LMT_2Gb)) || +	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && +	     !(phba->lmt & LMT_4Gb)) || +	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && +	     !(phba->lmt & LMT_8Gb)) || +	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && +	     !(phba->lmt & LMT_10Gb)) || +	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && +	     !(phba->lmt & LMT_16Gb))) { +		/* Reset link speed to auto */ +		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, +			"1302 Invalid speed for this board:%d " +			"Reset link speed to auto.\n", +			phba->cfg_link_speed); +			phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; +	} +	lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);  	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; -	lpfc_set_loopback_flag(phba); +	if (phba->sli_rev < LPFC_SLI_REV4) +		lpfc_set_loopback_flag(phba);  	rc = lpfc_sli_issue_mbox(phba, pmb, flag); -	if (rc != MBX_SUCCESS) { +	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  			"0498 Adapter failed to init, mbxCmd x%x "  			"INIT_LINK, mbxStatus x%x\n",  			mb->mbxCommand, mb->mbxStatus); -		/* Clear all interrupt enable conditions */ -		writel(0, phba->HCregaddr); -		readl(phba->HCregaddr); /* flush */ -		/* Clear all pending interrupts */ -		writel(0xffffffff, phba->HAregaddr); -		readl(phba->HAregaddr); /* flush */ +		if (phba->sli_rev <= LPFC_SLI_REV3) { +			/* Clear all interrupt enable conditions */ +			writel(0, phba->HCregaddr); +			readl(phba->HCregaddr); /* flush */ +			/* Clear all pending interrupts */ +			writel(0xffffffff, phba->HAregaddr); +			readl(phba->HAregaddr); /* flush */ +		}  		phba->link_state = LPFC_HBA_ERROR;  		if (rc != MBX_BUSY || flag == MBX_POLL)  			mempool_free(pmb, phba->mbox_mem_pool); @@ -759,58 +820,153 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)  }  /** - * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset + * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free + * rspiocb which got deferred + *   * @phba: pointer to lpfc HBA data structure.   * - * This routine will do uninitialization after the HBA is reset when bring - * down the SLI Layer. + * This routine will cleanup completed slow path events after HBA is reset + * when bringing down the SLI Layer. + *   *   * Return codes - *   0 - success. - *   Any other value - error. + *   void.   **/ -static int -lpfc_hba_down_post_s3(struct lpfc_hba *phba) +static void +lpfc_sli4_free_sp_events(struct lpfc_hba *phba) +{ +	struct lpfc_iocbq *rspiocbq; +	struct hbq_dmabuf *dmabuf; +	struct lpfc_cq_event *cq_event; + +	spin_lock_irq(&phba->hbalock); +	phba->hba_flag &= ~HBA_SP_QUEUE_EVT; +	spin_unlock_irq(&phba->hbalock); + +	while (!list_empty(&phba->sli4_hba.sp_queue_event)) { +		/* Get the response iocb from the head of work queue */ +		spin_lock_irq(&phba->hbalock); +		list_remove_head(&phba->sli4_hba.sp_queue_event, +				 cq_event, struct lpfc_cq_event, list); +		spin_unlock_irq(&phba->hbalock); + +		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { +		case CQE_CODE_COMPL_WQE: +			rspiocbq = container_of(cq_event, struct lpfc_iocbq, +						 cq_event); +			lpfc_sli_release_iocbq(phba, rspiocbq); +			break; +		case CQE_CODE_RECEIVE: +		case CQE_CODE_RECEIVE_V1: +			dmabuf = container_of(cq_event, struct hbq_dmabuf, +					      cq_event); +			lpfc_in_buf_free(phba, &dmabuf->dbuf); +		} +	} +} + +/** + * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will cleanup posted ELS buffers after the HBA is reset + * when bringing down the SLI Layer. + * + * + * Return codes + *   void. + **/ +static void +lpfc_hba_free_post_buf(struct lpfc_hba *phba)  {  	struct lpfc_sli *psli = &phba->sli;  	struct lpfc_sli_ring *pring;  	struct lpfc_dmabuf *mp, *next_mp; -	LIST_HEAD(completions); -	int i; +	LIST_HEAD(buflist); +	int count;  	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)  		lpfc_sli_hbqbuf_free_all(phba);  	else {  		/* Cleanup preposted buffers on the ELS ring */  		pring = &psli->ring[LPFC_ELS_RING]; -		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { +		spin_lock_irq(&phba->hbalock); +		list_splice_init(&pring->postbufq, &buflist); +		spin_unlock_irq(&phba->hbalock); + +		count = 0; +		list_for_each_entry_safe(mp, next_mp, &buflist, list) {  			list_del(&mp->list); -			pring->postbufq_cnt--; +			count++;  			lpfc_mbuf_free(phba, mp->virt, mp->phys);  			kfree(mp);  		} + +		spin_lock_irq(&phba->hbalock); +		pring->postbufq_cnt -= count; +		spin_unlock_irq(&phba->hbalock);  	} +} + +/** + * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will cleanup the txcmplq after the HBA is reset when bringing + * down the SLI Layer. + * + * Return codes + *   void + **/ +static void +lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) +{ +	struct lpfc_sli *psli = &phba->sli; +	struct lpfc_sli_ring *pring; +	LIST_HEAD(completions); +	int i; -	spin_lock_irq(&phba->hbalock);  	for (i = 0; i < psli->num_rings; i++) {  		pring = &psli->ring[i]; - +		if (phba->sli_rev >= LPFC_SLI_REV4) +			spin_lock_irq(&pring->ring_lock); +		else +			spin_lock_irq(&phba->hbalock);  		/* At this point in time the HBA is either reset or DOA. Either  		 * way, nothing should be on txcmplq as it will NEVER complete.  		 */  		list_splice_init(&pring->txcmplq, &completions);  		pring->txcmplq_cnt = 0; -		spin_unlock_irq(&phba->hbalock); + +		if (phba->sli_rev >= LPFC_SLI_REV4) +			spin_unlock_irq(&pring->ring_lock); +		else +			spin_unlock_irq(&phba->hbalock);  		/* Cancel all the IOCBs from the completions list */  		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,  				      IOERR_SLI_ABORTED); -  		lpfc_sli_abort_iocb_ring(phba, pring); -		spin_lock_irq(&phba->hbalock);  	} -	spin_unlock_irq(&phba->hbalock); +} +/** + * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset +	int i; + * @phba: pointer to lpfc HBA data structure. + * + * This routine will do uninitialization after the HBA is reset when bring + * down the SLI Layer. + * + * Return codes + *   0 - success. + *   Any other value - error. + **/ +static int +lpfc_hba_down_post_s3(struct lpfc_hba *phba) +{ +	lpfc_hba_free_post_buf(phba); +	lpfc_hba_clean_txcmplq(phba);  	return 0;  } @@ -830,13 +986,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)  {  	struct lpfc_scsi_buf *psb, *psb_next;  	LIST_HEAD(aborts); -	int ret;  	unsigned long iflag = 0;  	struct lpfc_sglq *sglq_entry = NULL; -	ret = lpfc_hba_down_post_s3(phba); -	if (ret) -		return ret; +	lpfc_hba_free_post_buf(phba); +	lpfc_hba_clean_txcmplq(phba); +  	/* At this point in time the HBA is either reset or DOA. Either  	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be  	 * on the lpfc_sgl_list so that it can either be freed if the @@ -869,9 +1024,11 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)  		psb->pCmd = NULL;  		psb->status = IOSTAT_SUCCESS;  	} -	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); -	list_splice(&aborts, &phba->lpfc_scsi_buf_list); -	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); +	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); +	list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); +	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); + +	lpfc_sli4_free_sp_events(phba);  	return 0;  } @@ -927,6 +1084,36 @@ lpfc_hb_timeout(unsigned long ptr)  }  /** + * lpfc_rrq_timeout - The RRQ-timer timeout handler + * @ptr: unsigned long holds the pointer to lpfc hba data structure. + * + * This is the RRQ-timer timeout handler registered to the lpfc driver. When + * this timer fires, a RRQ timeout event shall be posted to the lpfc driver + * work-port-events bitmap and the worker thread is notified. This timeout + * event will be used by the worker thread to invoke the actual timeout + * handler routine, lpfc_rrq_handler. Any periodical operations will + * be performed in the timeout handler and the RRQ timeout event bit shall + * be cleared by the worker thread after it has taken the event bitmap out. + **/ +static void +lpfc_rrq_timeout(unsigned long ptr) +{ +	struct lpfc_hba *phba; +	unsigned long iflag; + +	phba = (struct lpfc_hba *)ptr; +	spin_lock_irqsave(&phba->pport->work_port_lock, iflag); +	if (!(phba->pport->load_flag & FC_UNLOADING)) +		phba->hba_flag |= HBA_RRQ_ACTIVE; +	else +		phba->hba_flag &= ~HBA_RRQ_ACTIVE; +	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); + +	if (!(phba->pport->load_flag & FC_UNLOADING)) +		lpfc_worker_wake_up(phba); +} + +/**   * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function   * @phba: pointer to lpfc hba data structure.   * @pmboxq: pointer to the driver internal queue element for mailbox command. @@ -957,7 +1144,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)  		!(phba->link_state == LPFC_HBA_ERROR) &&  		!(phba->pport->load_flag & FC_UNLOADING))  		mod_timer(&phba->hb_tmofunc, -			jiffies + HZ * LPFC_HB_MBOX_INTERVAL); +			  jiffies + +			  msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));  	return;  } @@ -1000,15 +1188,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)  	spin_lock_irq(&phba->pport->work_port_lock); -	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, -		jiffies)) { +	if (time_after(phba->last_completion_time + +			msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), +			jiffies)) {  		spin_unlock_irq(&phba->pport->work_port_lock);  		if (!phba->hb_outstanding)  			mod_timer(&phba->hb_tmofunc, -				jiffies + HZ * LPFC_HB_MBOX_INTERVAL); +				jiffies + +				msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));  		else  			mod_timer(&phba->hb_tmofunc, -				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); +				jiffies + +				msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));  		return;  	}  	spin_unlock_irq(&phba->pport->work_port_lock); @@ -1040,7 +1231,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)  				if (!pmboxq) {  					mod_timer(&phba->hb_tmofunc,  						 jiffies + -						 HZ * LPFC_HB_MBOX_INTERVAL); +						 msecs_to_jiffies(1000 * +						 LPFC_HB_MBOX_INTERVAL));  					return;  				} @@ -1056,7 +1248,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)  							phba->mbox_mem_pool);  					mod_timer(&phba->hb_tmofunc,  						jiffies + -						HZ * LPFC_HB_MBOX_INTERVAL); +						msecs_to_jiffies(1000 * +						LPFC_HB_MBOX_INTERVAL));  					return;  				}  				phba->skipped_hb = 0; @@ -1072,7 +1265,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)  				phba->skipped_hb = jiffies;  			mod_timer(&phba->hb_tmofunc, -				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); +				 jiffies + +				 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));  			return;  		} else {  			/* @@ -1086,7 +1280,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)  					jiffies_to_msecs(jiffies  						 - phba->last_completion_time));  			mod_timer(&phba->hb_tmofunc, -				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); +				jiffies + +				msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));  		}  	}  } @@ -1106,7 +1301,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba)  	spin_lock_irq(&phba->hbalock);  	psli->sli_flag &= ~LPFC_SLI_ACTIVE;  	spin_unlock_irq(&phba->hbalock); -	lpfc_offline_prep(phba); +	lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);  	lpfc_offline(phba);  	lpfc_reset_barrier(phba); @@ -1127,10 +1322,10 @@ lpfc_offline_eratt(struct lpfc_hba *phba)   * This routine is called to bring a SLI4 HBA offline when HBA hardware error   * other than Port Error 6 has been detected.   **/ -static void +void  lpfc_sli4_offline_eratt(struct lpfc_hba *phba)  { -	lpfc_offline_prep(phba); +	lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);  	lpfc_offline(phba);  	lpfc_sli4_brdreset(phba);  	lpfc_hba_down_post(phba); @@ -1152,7 +1347,6 @@ static void  lpfc_handle_deferred_eratt(struct lpfc_hba *phba)  {  	uint32_t old_host_status = phba->work_hs; -	struct lpfc_sli_ring  *pring;  	struct lpfc_sli *psli = &phba->sli;  	/* If the pci channel is offline, ignore possible errors, @@ -1181,20 +1375,22 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)  	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the  	 * SCSI layer retry it after re-establishing link.  	 */ -	pring = &psli->ring[psli->fcp_ring]; -	lpfc_sli_abort_iocb_ring(phba, pring); +	lpfc_sli_abort_fcp_rings(phba);  	/*  	 * There was a firmware error. Take the hba offline and then  	 * attempt to restart it.  	 */ -	lpfc_offline_prep(phba); +	lpfc_offline_prep(phba, LPFC_MBX_WAIT);  	lpfc_offline(phba);  	/* Wait for the ER1 bit to clear.*/  	while (phba->work_hs & HS_FFER1) {  		msleep(100); -		phba->work_hs = readl(phba->HSregaddr); +		if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { +			phba->work_hs = UNPLUG_ERR ; +			break; +		}  		/* If driver is unloading let the worker thread continue */  		if (phba->pport->load_flag & FC_UNLOADING) {  			phba->work_hs = 0; @@ -1247,7 +1443,6 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)  {  	struct lpfc_vport *vport = phba->pport;  	struct lpfc_sli   *psli = &phba->sli; -	struct lpfc_sli_ring  *pring;  	uint32_t event_data;  	unsigned long temperature;  	struct temp_event temp_event_data; @@ -1299,14 +1494,13 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)  		* Error iocb (I/O) on txcmplq and let the SCSI layer  		* retry it after re-establishing link.  		*/ -		pring = &psli->ring[psli->fcp_ring]; -		lpfc_sli_abort_iocb_ring(phba, pring); +		lpfc_sli_abort_fcp_rings(phba);  		/*  		 * There was a firmware error.  Take the hba offline and then  		 * attempt to restart it.  		 */ -		lpfc_offline_prep(phba); +		lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);  		lpfc_offline(phba);  		lpfc_sli_brdrestart(phba);  		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */ @@ -1362,6 +1556,56 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)  }  /** + * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg + * @phba: pointer to lpfc hba data structure. + * @mbx_action: flag for mailbox shutdown action. + * + * This routine is invoked to perform an SLI4 port PCI function reset in + * response to port status register polling attention. It waits for port + * status register (ERR, RDY, RN) bits before proceeding with function reset. + * During this process, interrupt vectors are freed and later requested + * for handling possible port resource change. + **/ +static int +lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, +			    bool en_rn_msg) +{ +	int rc; +	uint32_t intr_mode; + +	/* +	 * On error status condition, driver need to wait for port +	 * ready before performing reset. +	 */ +	rc = lpfc_sli4_pdev_status_reg_wait(phba); +	if (!rc) { +		/* need reset: attempt for port recovery */ +		if (en_rn_msg) +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"2887 Reset Needed: Attempting Port " +					"Recovery...\n"); +		lpfc_offline_prep(phba, mbx_action); +		lpfc_offline(phba); +		/* release interrupt for possible resource change */ +		lpfc_sli4_disable_intr(phba); +		lpfc_sli_brdrestart(phba); +		/* request and enable interrupt */ +		intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); +		if (intr_mode == LPFC_INTR_ERROR) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"3175 Failed to enable interrupt\n"); +			return -EIO; +		} else { +			phba->intr_mode = intr_mode; +		} +		rc = lpfc_online(phba); +		if (rc == 0) +			lpfc_unblock_mgmt_io(phba); +	} +	return rc; +} + +/**   * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler   * @phba: pointer to lpfc hba data structure.   * @@ -1374,6 +1618,13 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)  	struct lpfc_vport *vport = phba->pport;  	uint32_t event_data;  	struct Scsi_Host *shost; +	uint32_t if_type; +	struct lpfc_register portstat_reg = {0}; +	uint32_t reg_err1, reg_err2; +	uint32_t uerrlo_reg, uemasklo_reg; +	uint32_t pci_rd_rc1, pci_rd_rc2; +	bool en_rn_msg = true; +	int rc;  	/* If the pci channel is offline, ignore possible errors, since  	 * we cannot communicate with the pci card anyway. @@ -1384,23 +1635,90 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)  	if (!phba->cfg_enable_hba_reset)  		return; +	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); +	switch (if_type) { +	case LPFC_SLI_INTF_IF_TYPE_0: +		pci_rd_rc1 = lpfc_readl( +				phba->sli4_hba.u.if_type0.UERRLOregaddr, +				&uerrlo_reg); +		pci_rd_rc2 = lpfc_readl( +				phba->sli4_hba.u.if_type0.UEMASKLOregaddr, +				&uemasklo_reg); +		/* consider PCI bus read error as pci_channel_offline */ +		if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) +			return; +		lpfc_sli4_offline_eratt(phba); +		break; +	case LPFC_SLI_INTF_IF_TYPE_2: +		pci_rd_rc1 = lpfc_readl( +				phba->sli4_hba.u.if_type2.STATUSregaddr, +				&portstat_reg.word0); +		/* consider PCI bus read error as pci_channel_offline */ +		if (pci_rd_rc1 == -EIO) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"3151 PCI bus read access failure: x%x\n", +				readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); +			return; +		} +		reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); +		reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); +		if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { +			/* TODO: Register for Overtemp async events. */ +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"2889 Port Overtemperature event, " +				"taking port offline\n"); +			spin_lock_irq(&phba->hbalock); +			phba->over_temp_state = HBA_OVER_TEMP; +			spin_unlock_irq(&phba->hbalock); +			lpfc_sli4_offline_eratt(phba); +			break; +		} +		if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && +		    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"3143 Port Down: Firmware Update " +					"Detected\n"); +			en_rn_msg = false; +		} else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && +			 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"3144 Port Down: Debug Dump\n"); +		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && +			 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"3145 Port Down: Provisioning\n"); + +		/* Check port status register for function reset */ +		rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, +				en_rn_msg); +		if (rc == 0) { +			/* don't report event on forced debug dump */ +			if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && +			    reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) +				return; +			else +				break; +		} +		/* fall through for not able to recover */ +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"3152 Unrecoverable error, bring the port " +				"offline\n"); +		lpfc_sli4_offline_eratt(phba); +		break; +	case LPFC_SLI_INTF_IF_TYPE_1: +	default: +		break; +	} +	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, +			"3123 Report dump event to upper layer\n");  	/* Send an internal error event to mgmt application */  	lpfc_board_errevt_to_mgmt(phba); -	/* For now, the actual action for SLI4 device handling is not -	 * specified yet, just treated it as adaptor hardware failure -	 */ -	lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -			"0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", -			phba->work_status[0], phba->work_status[1]); -  	event_data = FC_REG_DUMP_EVENT;  	shost = lpfc_shost_from_vport(vport);  	fc_host_post_vendor_event(shost, fc_get_event_number(),  				  sizeof(event_data), (char *) &event_data,  				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); - -	lpfc_sli4_offline_eratt(phba);  }  /** @@ -1459,8 +1777,8 @@ lpfc_handle_latt(struct lpfc_hba *phba)  	lpfc_els_flush_all_cmd(phba);  	psli->slistat.link_event++; -	lpfc_read_la(phba, pmb, mp); -	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; +	lpfc_read_topology(phba, pmb, mp); +	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;  	pmb->vport = vport;  	/* Block ELS IOCBs until we have processed this mbox command */  	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; @@ -1628,11 +1946,20 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)  				j = 0;  				Length -= (3+i);  				while(i--) { -				phba->Port[j++] = vpd[index++]; -				if (j == 19) -					break; +					if ((phba->sli_rev == LPFC_SLI_REV4) && +					    (phba->sli4_hba.pport_name_sta == +					     LPFC_SLI4_PPNAME_GET)) { +						j++; +						index++; +					} else +						phba->Port[j++] = vpd[index++]; +					if (j == 19) +						break;  				} -				phba->Port[j] = 0; +				if ((phba->sli_rev != LPFC_SLI_REV4) || +				    (phba->sli4_hba.pport_name_sta == +				     LPFC_SLI4_PPNAME_NON)) +					phba->Port[j] = 0;  				continue;  			}  			else { @@ -1687,7 +2014,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)  		&& descp && descp[0] != '\0')  		return; -	if (phba->lmt & LMT_10Gb) +	if (phba->lmt & LMT_16Gb) +		max_speed = 16; +	else if (phba->lmt & LMT_10Gb)  		max_speed = 10;  	else if (phba->lmt & LMT_8Gb)  		max_speed = 8; @@ -1695,85 +2024,90 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)  		max_speed = 4;  	else if (phba->lmt & LMT_2Gb)  		max_speed = 2; -	else +	else if (phba->lmt & LMT_1Gb)  		max_speed = 1; +	else +		max_speed = 0;  	vp = &phba->vpd;  	switch (dev_id) {  	case PCI_DEVICE_ID_FIREFLY: -		m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LP6000", "PCI", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_SUPERFLY:  		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) -			m = (typeof(m)){"LP7000", "PCI", -					"Fibre Channel Adapter"}; +			m = (typeof(m)){"LP7000", "PCI", ""};  		else -			m = (typeof(m)){"LP7000E", "PCI", -					"Fibre Channel Adapter"}; +			m = (typeof(m)){"LP7000E", "PCI", ""}; +		m.function = "Obsolete, Unsupported Fibre Channel Adapter";  		break;  	case PCI_DEVICE_ID_DRAGONFLY:  		m = (typeof(m)){"LP8000", "PCI", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_CENTAUR:  		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) -			m = (typeof(m)){"LP9002", "PCI", -					"Fibre Channel Adapter"}; +			m = (typeof(m)){"LP9002", "PCI", ""};  		else -			m = (typeof(m)){"LP9000", "PCI", -					"Fibre Channel Adapter"}; +			m = (typeof(m)){"LP9000", "PCI", ""}; +		m.function = "Obsolete, Unsupported Fibre Channel Adapter";  		break;  	case PCI_DEVICE_ID_RFLY:  		m = (typeof(m)){"LP952", "PCI", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_PEGASUS:  		m = (typeof(m)){"LP9802", "PCI-X", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_THOR:  		m = (typeof(m)){"LP10000", "PCI-X", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_VIPER:  		m = (typeof(m)){"LPX1000",  "PCI-X", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_PFLY:  		m = (typeof(m)){"LP982", "PCI-X", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_TFLY:  		m = (typeof(m)){"LP1050", "PCI-X", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_HELIOS:  		m = (typeof(m)){"LP11000", "PCI-X2", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_HELIOS_SCSP:  		m = (typeof(m)){"LP11000-SP", "PCI-X2", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_HELIOS_DCSP:  		m = (typeof(m)){"LP11002-SP",  "PCI-X2", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_NEPTUNE: -		m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LPe1000", "PCIe", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_NEPTUNE_SCSP: -		m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LPe1000-SP", "PCIe", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_NEPTUNE_DCSP: -		m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LPe1002-SP", "PCIe", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_BMID:  		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_BSMB: -		m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LP111", "PCI-X2", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_ZEPHYR:  		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; @@ -1792,16 +2126,20 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)  		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_LP101: -		m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LP101", "PCI-X", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_LP10000S: -		m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LP10000-S", "PCI", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_LP11000S: -		m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LP11000-S", "PCI-X2", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_LPE11000S: -		m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LPe11000-S", "PCIe", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_SAT:  		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; @@ -1822,20 +2160,21 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)  		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_HORNET: -		m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; +		m = (typeof(m)){"LP21000", "PCIe", +				"Obsolete, Unsupported FCoE Adapter"};  		GE = 1;  		break;  	case PCI_DEVICE_ID_PROTEUS_VF:  		m = (typeof(m)){"LPev12000", "PCIe IOV", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_PROTEUS_PF:  		m = (typeof(m)){"LPev12000", "PCIe IOV", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_PROTEUS_S:  		m = (typeof(m)){"LPemv12002-S", "PCIe IOV", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_TIGERSHARK:  		oneConnect = 1; @@ -1851,7 +2190,28 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)  		break;  	case PCI_DEVICE_ID_BALIUS:  		m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"}; +		break; +	case PCI_DEVICE_ID_LANCER_FC: +		m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; +		break; +	case PCI_DEVICE_ID_LANCER_FC_VF: +		m = (typeof(m)){"LPe16000", "PCIe", +				"Obsolete, Unsupported Fibre Channel Adapter"}; +		break; +	case PCI_DEVICE_ID_LANCER_FCOE: +		oneConnect = 1; +		m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; +		break; +	case PCI_DEVICE_ID_LANCER_FCOE_VF: +		oneConnect = 1; +		m = (typeof(m)){"OCe15100", "PCIe", +				"Obsolete, Unsupported FCoE"}; +		break; +	case PCI_DEVICE_ID_SKYHAWK: +	case PCI_DEVICE_ID_SKYHAWK_VF: +		oneConnect = 1; +		m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};  		break;  	default:  		m = (typeof(m)){"Unknown", "", ""}; @@ -1860,15 +2220,20 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)  	if (mdp && mdp[0] == '\0')  		snprintf(mdp, 79,"%s", m.name); -	/* oneConnect hba requires special processing, they are all initiators +	/* +	 * oneConnect hba requires special processing, they are all initiators  	 * and we put the port number on the end  	 */  	if (descp && descp[0] == '\0') {  		if (oneConnect)  			snprintf(descp, 255, -				"Emulex OneConnect %s, %s Initiator, Port %s", +				"Emulex OneConnect %s, %s Initiator %s",  				m.name, m.function,  				phba->Port); +		else if (max_speed == 0) +			snprintf(descp, 255, +				"Emulex %s %s %s ", +				m.name, m.bus, m.function);  		else  			snprintf(descp, 255,  				"Emulex %s %d%s %s %s", @@ -2172,13 +2537,20 @@ lpfc_cleanup(struct lpfc_vport *vport)  			continue;  		} +		/* take care of nodes in unused state before the state +		 * machine taking action. +		 */ +		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { +			lpfc_nlp_put(ndlp); +			continue; +		} +  		if (ndlp->nlp_type & NLP_FABRIC)  			lpfc_disc_state_machine(vport, ndlp, NULL,  					NLP_EVT_DEVICE_RECOVERY);  		lpfc_disc_state_machine(vport, ndlp, NULL,  					     NLP_EVT_DEVICE_RM); -  	}  	/* At this point, ALL ndlp's should be gone @@ -2206,6 +2578,7 @@ lpfc_cleanup(struct lpfc_vport *vport)  		/* Wait for any activity on ndlps to settle */  		msleep(10);  	} +	lpfc_cleanup_vports_rrqs(vport, NULL);  }  /** @@ -2221,6 +2594,7 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)  {  	del_timer_sync(&vport->els_tmofunc);  	del_timer_sync(&vport->fc_fdmitmo); +	del_timer_sync(&vport->delayed_disc_tmo);  	lpfc_can_disctmo(vport);  	return;  } @@ -2281,6 +2655,10 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)  	del_timer_sync(&phba->fabric_block_timer);  	del_timer_sync(&phba->eratt_poll);  	del_timer_sync(&phba->hb_tmofunc); +	if (phba->sli_rev == LPFC_SLI_REV4) { +		del_timer_sync(&phba->rrq_tmr); +		phba->hba_flag &= ~HBA_RRQ_ACTIVE; +	}  	phba->hb_outstanding = 0;  	switch (phba->pci_dev_grp) { @@ -2312,23 +2690,29 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)   * driver prepares the HBA interface for online or offline.   **/  static void -lpfc_block_mgmt_io(struct lpfc_hba * phba) +lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)  {  	unsigned long iflag;  	uint8_t actcmd = MBX_HEARTBEAT;  	unsigned long timeout; -  	spin_lock_irqsave(&phba->hbalock, iflag);  	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; -	if (phba->sli.mbox_active) +	spin_unlock_irqrestore(&phba->hbalock, iflag); +	if (mbx_action == LPFC_MBX_NO_WAIT) +		return; +	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; +	spin_lock_irqsave(&phba->hbalock, iflag); +	if (phba->sli.mbox_active) {  		actcmd = phba->sli.mbox_active->u.mb.mbxCommand; +		/* Determine how long we might wait for the active mailbox +		 * command to be gracefully completed by firmware. +		 */ +		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, +				phba->sli.mbox_active) * 1000) + jiffies; +	}  	spin_unlock_irqrestore(&phba->hbalock, iflag); -	/* Determine how long we might wait for the active mailbox -	 * command to be gracefully completed by firmware. -	 */ -	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + -			jiffies; +  	/* Wait for the outstnading mailbox command to complete */  	while (phba->sli.mbox_active) {  		/* Check active mailbox complete status every 2ms */ @@ -2344,6 +2728,42 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)  }  /** + * lpfc_sli4_node_prep - Assign RPIs for active nodes. + * @phba: pointer to lpfc hba data structure. + * + * Allocate RPIs for all active remote nodes. This is needed whenever + * an SLI4 adapter is reset and the driver is not unloading. Its purpose + * is to fixup the temporary rpi assignments. + **/ +void +lpfc_sli4_node_prep(struct lpfc_hba *phba) +{ +	struct lpfc_nodelist  *ndlp, *next_ndlp; +	struct lpfc_vport **vports; +	int i; + +	if (phba->sli_rev != LPFC_SLI_REV4) +		return; + +	vports = lpfc_create_vport_work_array(phba); +	if (vports != NULL) { +		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { +			if (vports[i]->load_flag & FC_UNLOADING) +				continue; + +			list_for_each_entry_safe(ndlp, next_ndlp, +						 &vports[i]->fc_nodes, +						 nlp_listp) { +				if (NLP_CHK_NODE_ACT(ndlp)) +					ndlp->nlp_rpi = +						lpfc_sli4_alloc_rpi(phba); +			} +		} +	} +	lpfc_destroy_vport_work_array(phba, vports); +} + +/**   * lpfc_online - Initialize and bring a HBA online   * @phba: pointer to lpfc hba data structure.   * @@ -2361,6 +2781,7 @@ lpfc_online(struct lpfc_hba *phba)  	struct lpfc_vport *vport;  	struct lpfc_vport **vports;  	int i; +	bool vpis_cleared = false;  	if (!phba)  		return 0; @@ -2372,7 +2793,7 @@ lpfc_online(struct lpfc_hba *phba)  	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,  			"0458 Bring Adapter online\n"); -	lpfc_block_mgmt_io(phba); +	lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);  	if (!lpfc_sli_queue_setup(phba)) {  		lpfc_unblock_mgmt_io(phba); @@ -2384,6 +2805,10 @@ lpfc_online(struct lpfc_hba *phba)  			lpfc_unblock_mgmt_io(phba);  			return 1;  		} +		spin_lock_irq(&phba->hbalock); +		if (!phba->sli4_hba.max_cfg_param.vpi_used) +			vpis_cleared = true; +		spin_unlock_irq(&phba->hbalock);  	} else {  		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */  			lpfc_unblock_mgmt_io(phba); @@ -2400,8 +2825,13 @@ lpfc_online(struct lpfc_hba *phba)  			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;  			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)  				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; -			if (phba->sli_rev == LPFC_SLI_REV4) +			if (phba->sli_rev == LPFC_SLI_REV4) {  				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; +				if ((vpis_cleared) && +				    (vports[i]->port_type != +					LPFC_PHYSICAL_PORT)) +					vports[i]->vpi = 0; +			}  			spin_unlock_irq(shost->host_lock);  		}  		lpfc_destroy_vport_work_array(phba, vports); @@ -2440,7 +2870,7 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba)   * queue to make it ready to be brought offline.   **/  void -lpfc_offline_prep(struct lpfc_hba * phba) +lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)  {  	struct lpfc_vport *vport = phba->pport;  	struct lpfc_nodelist  *ndlp, *next_ndlp; @@ -2451,7 +2881,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)  	if (vport->fc_flag & FC_OFFLINE_MODE)  		return; -	lpfc_block_mgmt_io(phba); +	lpfc_block_mgmt_io(phba, mbx_action);  	lpfc_linkdown(phba); @@ -2485,13 +2915,20 @@ lpfc_offline_prep(struct lpfc_hba * phba)  				spin_lock_irq(shost->host_lock);  				ndlp->nlp_flag &= ~NLP_NPR_ADISC;  				spin_unlock_irq(shost->host_lock); +				/* +				 * Whenever an SLI4 port goes offline, free the +				 * RPI. Get a new RPI when the adapter port +				 * comes back online. +				 */ +				if (phba->sli_rev == LPFC_SLI_REV4) +					lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);  				lpfc_unreg_rpi(vports[i], ndlp);  			}  		}  	}  	lpfc_destroy_vport_work_array(phba, vports); -	lpfc_sli_mbox_sys_shutdown(phba); +	lpfc_sli_mbox_sys_shutdown(phba, mbx_action);  }  /** @@ -2546,27 +2983,38 @@ lpfc_offline(struct lpfc_hba *phba)   * This routine is to free all the SCSI buffers and IOCBs from the driver   * list back to kernel. It is called from lpfc_pci_remove_one to free   * the internal resources before the device is removed from the system. - * - * Return codes - *   0 - successful (for now, it always returns 0)   **/ -static int +static void  lpfc_scsi_free(struct lpfc_hba *phba)  {  	struct lpfc_scsi_buf *sb, *sb_next;  	struct lpfc_iocbq *io, *io_next;  	spin_lock_irq(&phba->hbalock); +  	/* Release all the lpfc_scsi_bufs maintained by this host. */ -	spin_lock(&phba->scsi_buf_list_lock); -	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { + +	spin_lock(&phba->scsi_buf_list_put_lock); +	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, +				 list) { +		list_del(&sb->list); +		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, +			      sb->dma_handle); +		kfree(sb); +		phba->total_scsi_bufs--; +	} +	spin_unlock(&phba->scsi_buf_list_put_lock); + +	spin_lock(&phba->scsi_buf_list_get_lock); +	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, +				 list) {  		list_del(&sb->list);  		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,  			      sb->dma_handle);  		kfree(sb);  		phba->total_scsi_bufs--;  	} -	spin_unlock(&phba->scsi_buf_list_lock); +	spin_unlock(&phba->scsi_buf_list_get_lock);  	/* Release all the lpfc_iocbq entries maintained by this host. */  	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { @@ -2574,8 +3022,186 @@ lpfc_scsi_free(struct lpfc_hba *phba)  		kfree(io);  		phba->total_iocbq_bufs--;  	} +  	spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping + * @phba: pointer to lpfc hba data structure. + * + * This routine first calculates the sizes of the current els and allocated + * scsi sgl lists, and then goes through all sgls to updates the physical + * XRIs assigned due to port function reset. During port initialization, the + * current els and allocated scsi sgl lists are 0s. + * + * Return codes + *   0 - successful (for now, it always returns 0) + **/ +int +lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) +{ +	struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; +	struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL; +	uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt; +	LIST_HEAD(els_sgl_list); +	LIST_HEAD(scsi_sgl_list); +	int rc; + +	/* +	 * update on pci function's els xri-sgl list +	 */ +	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); +	if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { +		/* els xri-sgl expanded */ +		xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; +		lpfc_printf_log(phba, KERN_INFO, LOG_SLI, +				"3157 ELS xri-sgl count increased from " +				"%d to %d\n", phba->sli4_hba.els_xri_cnt, +				els_xri_cnt); +		/* allocate the additional els sgls */ +		for (i = 0; i < xri_cnt; i++) { +			sglq_entry = kzalloc(sizeof(struct lpfc_sglq), +					     GFP_KERNEL); +			if (sglq_entry == NULL) { +				lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +						"2562 Failure to allocate an " +						"ELS sgl entry:%d\n", i); +				rc = -ENOMEM; +				goto out_free_mem; +			} +			sglq_entry->buff_type = GEN_BUFF_TYPE; +			sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, +							   &sglq_entry->phys); +			if (sglq_entry->virt == NULL) { +				kfree(sglq_entry); +				lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +						"2563 Failure to allocate an " +						"ELS mbuf:%d\n", i); +				rc = -ENOMEM; +				goto out_free_mem; +			} +			sglq_entry->sgl = sglq_entry->virt; +			memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); +			sglq_entry->state = SGL_FREED; +			list_add_tail(&sglq_entry->list, &els_sgl_list); +		} +		spin_lock_irq(&phba->hbalock); +		list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); +		spin_unlock_irq(&phba->hbalock); +	} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { +		/* els xri-sgl shrinked */ +		xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; +		lpfc_printf_log(phba, KERN_INFO, LOG_SLI, +				"3158 ELS xri-sgl count decreased from " +				"%d to %d\n", phba->sli4_hba.els_xri_cnt, +				els_xri_cnt); +		spin_lock_irq(&phba->hbalock); +		list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list); +		spin_unlock_irq(&phba->hbalock); +		/* release extra els sgls from list */ +		for (i = 0; i < xri_cnt; i++) { +			list_remove_head(&els_sgl_list, +					 sglq_entry, struct lpfc_sglq, list); +			if (sglq_entry) { +				lpfc_mbuf_free(phba, sglq_entry->virt, +					       sglq_entry->phys); +				kfree(sglq_entry); +			} +		} +		spin_lock_irq(&phba->hbalock); +		list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); +		spin_unlock_irq(&phba->hbalock); +	} else +		lpfc_printf_log(phba, KERN_INFO, LOG_SLI, +				"3163 ELS xri-sgl count unchanged: %d\n", +				els_xri_cnt); +	phba->sli4_hba.els_xri_cnt = els_xri_cnt; + +	/* update xris to els sgls on the list */ +	sglq_entry = NULL; +	sglq_entry_next = NULL; +	list_for_each_entry_safe(sglq_entry, sglq_entry_next, +				 &phba->sli4_hba.lpfc_sgl_list, list) { +		lxri = lpfc_sli4_next_xritag(phba); +		if (lxri == NO_XRI) { +			lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +					"2400 Failed to allocate xri for " +					"ELS sgl\n"); +			rc = -ENOMEM; +			goto out_free_mem; +		} +		sglq_entry->sli4_lxritag = lxri; +		sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; +	} + +	/* +	 * update on pci function's allocated scsi xri-sgl list +	 */ +	phba->total_scsi_bufs = 0; + +	/* maximum number of xris available for scsi buffers */ +	phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - +				      els_xri_cnt; + +	lpfc_printf_log(phba, KERN_INFO, LOG_SLI, +			"2401 Current allocated SCSI xri-sgl count:%d, " +			"maximum  SCSI xri count:%d\n", +			phba->sli4_hba.scsi_xri_cnt, +			phba->sli4_hba.scsi_xri_max); + +	spin_lock_irq(&phba->scsi_buf_list_get_lock); +	spin_lock(&phba->scsi_buf_list_put_lock); +	list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); +	list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); +	spin_unlock(&phba->scsi_buf_list_put_lock); +	spin_unlock_irq(&phba->scsi_buf_list_get_lock); + +	if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { +		/* max scsi xri shrinked below the allocated scsi buffers */ +		scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - +					phba->sli4_hba.scsi_xri_max; +		/* release the extra allocated scsi buffers */ +		for (i = 0; i < scsi_xri_cnt; i++) { +			list_remove_head(&scsi_sgl_list, psb, +					 struct lpfc_scsi_buf, list); +			pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data, +				      psb->dma_handle); +			kfree(psb); +		} +		spin_lock_irq(&phba->scsi_buf_list_get_lock); +		phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; +		spin_unlock_irq(&phba->scsi_buf_list_get_lock); +	} + +	/* update xris associated to remaining allocated scsi buffers */ +	psb = NULL; +	psb_next = NULL; +	list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) { +		lxri = lpfc_sli4_next_xritag(phba); +		if (lxri == NO_XRI) { +			lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +					"2560 Failed to allocate xri for " +					"scsi buffer\n"); +			rc = -ENOMEM; +			goto out_free_mem; +		} +		psb->cur_iocbq.sli4_lxritag = lxri; +		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; +	} +	spin_lock_irq(&phba->scsi_buf_list_get_lock); +	spin_lock(&phba->scsi_buf_list_put_lock); +	list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); +	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); +	spin_unlock(&phba->scsi_buf_list_put_lock); +	spin_unlock_irq(&phba->scsi_buf_list_get_lock); +  	return 0; + +out_free_mem: +	lpfc_free_els_sgl_list(phba); +	lpfc_scsi_free(phba); +	return rc;  }  /** @@ -2658,6 +3284,11 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)  	init_timer(&vport->els_tmofunc);  	vport->els_tmofunc.function = lpfc_els_timeout;  	vport->els_tmofunc.data = (unsigned long)vport; + +	init_timer(&vport->delayed_disc_tmo); +	vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; +	vport->delayed_disc_tmo.data = (unsigned long)vport; +  	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);  	if (error)  		goto out_put_shost; @@ -2711,14 +3342,10 @@ destroy_port(struct lpfc_vport *vport)  int  lpfc_get_instance(void)  { -	int instance = 0; +	int ret; -	/* Assign an unused number */ -	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) -		return -1; -	if (idr_get_new(&lpfc_hba_index, NULL, &instance)) -		return -1; -	return instance; +	ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); +	return ret < 0 ? -1 : ret;  }  /** @@ -2748,14 +3375,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)  		stat = 1;  		goto finished;  	} -	if (time >= 30 * HZ) { +	if (time >= msecs_to_jiffies(30 * 1000)) {  		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,  				"0461 Scanning longer than 30 "  				"seconds.  Continuing initialization\n");  		stat = 1;  		goto finished;  	} -	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { +	if (time >= msecs_to_jiffies(15 * 1000) && +	    phba->link_state <= LPFC_LINK_DOWN) {  		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,  				"0465 Link down longer than 15 "  				"seconds.  Continuing initialization\n"); @@ -2767,7 +3395,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)  		goto finished;  	if (vport->num_disc_nodes || vport->fc_prli_sent)  		goto finished; -	if (vport->fc_map_cnt == 0 && time < 2 * HZ) +	if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))  		goto finished;  	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)  		goto finished; @@ -2807,6 +3435,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)  				 sizeof fc_host_symbolic_name(shost));  	fc_host_supported_speeds(shost) = 0; +	if (phba->lmt & LMT_16Gb) +		fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;  	if (phba->lmt & LMT_10Gb)  		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;  	if (phba->lmt & LMT_8Gb) @@ -2943,63 +3573,6 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)  }  /** - * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support - * @phba: pointer to lpfc hba data structure. - * - * This function uses the QUERY_FW_CFG mailbox command to determine if the - * firmware loaded supports FCoE. A return of zero indicates that the mailbox - * was successful and the firmware supports FCoE. Any other return indicates - * a error. It is assumed that this function will be called before interrupts - * are enabled. - **/ -static int -lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba) -{ -	int rc = 0; -	LPFC_MBOXQ_t *mboxq; -	struct lpfc_mbx_query_fw_cfg *query_fw_cfg; -	uint32_t length; -	uint32_t shdr_status, shdr_add_status; - -	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); -	if (!mboxq) { -		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"2621 Failed to allocate mbox for " -				"query firmware config cmd\n"); -		return -ENOMEM; -	} -	query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg; -	length = (sizeof(struct lpfc_mbx_query_fw_cfg) - -		  sizeof(struct lpfc_sli4_cfg_mhdr)); -	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, -			 LPFC_MBOX_OPCODE_QUERY_FW_CFG, -			 length, LPFC_SLI4_MBX_EMBED); -	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); -	/* The IOCTL status is embedded in the mailbox subheader. */ -	shdr_status = bf_get(lpfc_mbox_hdr_status, -			     &query_fw_cfg->header.cfg_shdr.response); -	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, -				 &query_fw_cfg->header.cfg_shdr.response); -	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { -		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, -				"2622 Query Firmware Config failed " -				"mbx status x%x, status x%x add_status x%x\n", -				rc, shdr_status, shdr_add_status); -		return -EINVAL; -	} -	if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) { -		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, -				"2623 FCoE Function not supported by firmware. " -				"Function mode = %08x\n", -				query_fw_cfg->function_mode); -		return -EINVAL; -	} -	if (rc != MBX_TIMEOUT) -		mempool_free(mboxq, phba->mbox_mem_pool); -	return 0; -} - -/**   * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code   * @phba: pointer to lpfc hba data structure.   * @acqe_link: pointer to the async link completion queue entry. @@ -3051,20 +3624,20 @@ lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,  	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {  	case LPFC_ASYNC_LINK_STATUS_DOWN:  	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: -		att_type = AT_LINK_DOWN; +		att_type = LPFC_ATT_LINK_DOWN;  		break;  	case LPFC_ASYNC_LINK_STATUS_UP:  		/* Ignore physical link up events - wait for logical link up */ -		att_type = AT_RESERVED; +		att_type = LPFC_ATT_RESERVED;  		break;  	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: -		att_type = AT_LINK_UP; +		att_type = LPFC_ATT_LINK_UP;  		break;  	default:  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0399 Invalid link attention type: x%x\n",  				bf_get(lpfc_acqe_link_status, acqe_link)); -		att_type = AT_RESERVED; +		att_type = LPFC_ATT_RESERVED;  		break;  	}  	return att_type; @@ -3088,36 +3661,145 @@ lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,  	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {  	case LPFC_ASYNC_LINK_SPEED_ZERO: -		link_speed = LA_UNKNW_LINK; -		break;  	case LPFC_ASYNC_LINK_SPEED_10MBPS: -		link_speed = LA_UNKNW_LINK; -		break;  	case LPFC_ASYNC_LINK_SPEED_100MBPS: -		link_speed = LA_UNKNW_LINK; +		link_speed = LPFC_LINK_SPEED_UNKNOWN;  		break;  	case LPFC_ASYNC_LINK_SPEED_1GBPS: -		link_speed = LA_1GHZ_LINK; +		link_speed = LPFC_LINK_SPEED_1GHZ;  		break;  	case LPFC_ASYNC_LINK_SPEED_10GBPS: -		link_speed = LA_10GHZ_LINK; +		link_speed = LPFC_LINK_SPEED_10GHZ;  		break;  	default:  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0483 Invalid link-attention link speed: x%x\n",  				bf_get(lpfc_acqe_link_speed, acqe_link)); -		link_speed = LA_UNKNW_LINK; +		link_speed = LPFC_LINK_SPEED_UNKNOWN;  		break;  	}  	return link_speed;  }  /** - * lpfc_sli4_async_link_evt - Process the asynchronous link event + * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed + * @phba: pointer to lpfc hba data structure. + * + * This routine is to get an SLI3 FC port's link speed in Mbps. + * + * Return: link speed in terms of Mbps. + **/ +uint32_t +lpfc_sli_port_speed_get(struct lpfc_hba *phba) +{ +	uint32_t link_speed; + +	if (!lpfc_is_link_up(phba)) +		return 0; + +	switch (phba->fc_linkspeed) { +	case LPFC_LINK_SPEED_1GHZ: +		link_speed = 1000; +		break; +	case LPFC_LINK_SPEED_2GHZ: +		link_speed = 2000; +		break; +	case LPFC_LINK_SPEED_4GHZ: +		link_speed = 4000; +		break; +	case LPFC_LINK_SPEED_8GHZ: +		link_speed = 8000; +		break; +	case LPFC_LINK_SPEED_10GHZ: +		link_speed = 10000; +		break; +	case LPFC_LINK_SPEED_16GHZ: +		link_speed = 16000; +		break; +	default: +		link_speed = 0; +	} +	return link_speed; +} + +/** + * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed + * @phba: pointer to lpfc hba data structure. + * @evt_code: asynchronous event code. + * @speed_code: asynchronous event link speed code. + * + * This routine is to parse the giving SLI4 async event link speed code into + * value of Mbps for the link speed. + * + * Return: link speed in terms of Mbps. + **/ +static uint32_t +lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, +			   uint8_t speed_code) +{ +	uint32_t port_speed; + +	switch (evt_code) { +	case LPFC_TRAILER_CODE_LINK: +		switch (speed_code) { +		case LPFC_EVT_CODE_LINK_NO_LINK: +			port_speed = 0; +			break; +		case LPFC_EVT_CODE_LINK_10_MBIT: +			port_speed = 10; +			break; +		case LPFC_EVT_CODE_LINK_100_MBIT: +			port_speed = 100; +			break; +		case LPFC_EVT_CODE_LINK_1_GBIT: +			port_speed = 1000; +			break; +		case LPFC_EVT_CODE_LINK_10_GBIT: +			port_speed = 10000; +			break; +		default: +			port_speed = 0; +		} +		break; +	case LPFC_TRAILER_CODE_FC: +		switch (speed_code) { +		case LPFC_EVT_CODE_FC_NO_LINK: +			port_speed = 0; +			break; +		case LPFC_EVT_CODE_FC_1_GBAUD: +			port_speed = 1000; +			break; +		case LPFC_EVT_CODE_FC_2_GBAUD: +			port_speed = 2000; +			break; +		case LPFC_EVT_CODE_FC_4_GBAUD: +			port_speed = 4000; +			break; +		case LPFC_EVT_CODE_FC_8_GBAUD: +			port_speed = 8000; +			break; +		case LPFC_EVT_CODE_FC_10_GBAUD: +			port_speed = 10000; +			break; +		case LPFC_EVT_CODE_FC_16_GBAUD: +			port_speed = 16000; +			break; +		default: +			port_speed = 0; +		} +		break; +	default: +		port_speed = 0; +	} +	return port_speed; +} + +/** + * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event   * @phba: pointer to lpfc hba data structure.   * @acqe_link: pointer to the async link completion queue entry.   * - * This routine is to handle the SLI4 asynchronous link event. + * This routine is to handle the SLI4 asynchronous FCoE link event.   **/  static void  lpfc_sli4_async_link_evt(struct lpfc_hba *phba, @@ -3126,11 +3808,12 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,  	struct lpfc_dmabuf *mp;  	LPFC_MBOXQ_t *pmb;  	MAILBOX_t *mb; -	READ_LA_VAR *la; +	struct lpfc_mbx_read_top *la;  	uint8_t att_type; +	int rc;  	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); -	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) +	if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)  		return;  	phba->fcoe_eventtag = acqe_link->event_tag;  	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -3161,45 +3844,171 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,  	/* Update link event statistics */  	phba->sli.slistat.link_event++; -	/* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ -	lpfc_read_la(phba, pmb, mp); +	/* Create lpfc_handle_latt mailbox command from link ACQE */ +	lpfc_read_topology(phba, pmb, mp); +	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;  	pmb->vport = phba->pport; +	/* Keep the link status for extra SLI4 state machine reference */ +	phba->sli4_hba.link_state.speed = +			lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, +				bf_get(lpfc_acqe_link_speed, acqe_link)); +	phba->sli4_hba.link_state.duplex = +				bf_get(lpfc_acqe_link_duplex, acqe_link); +	phba->sli4_hba.link_state.status = +				bf_get(lpfc_acqe_link_status, acqe_link); +	phba->sli4_hba.link_state.type = +				bf_get(lpfc_acqe_link_type, acqe_link); +	phba->sli4_hba.link_state.number = +				bf_get(lpfc_acqe_link_number, acqe_link); +	phba->sli4_hba.link_state.fault = +				bf_get(lpfc_acqe_link_fault, acqe_link); +	phba->sli4_hba.link_state.logical_speed = +			bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; + +	lpfc_printf_log(phba, KERN_INFO, LOG_SLI, +			"2900 Async FC/FCoE Link event - Speed:%dGBit " +			"duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " +			"Logical speed:%dMbps Fault:%d\n", +			phba->sli4_hba.link_state.speed, +			phba->sli4_hba.link_state.topology, +			phba->sli4_hba.link_state.status, +			phba->sli4_hba.link_state.type, +			phba->sli4_hba.link_state.number, +			phba->sli4_hba.link_state.logical_speed, +			phba->sli4_hba.link_state.fault); +	/* +	 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch +	 * topology info. Note: Optional for non FC-AL ports. +	 */ +	if (!(phba->hba_flag & HBA_FCOE_MODE)) { +		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); +		if (rc == MBX_NOT_FINISHED) +			goto out_free_dmabuf; +		return; +	} +	/* +	 * For FCoE Mode: fill in all the topology information we need and call +	 * the READ_TOPOLOGY completion routine to continue without actually +	 * sending the READ_TOPOLOGY mailbox command to the port. +	 */  	/* Parse and translate status field */  	mb = &pmb->u.mb;  	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);  	/* Parse and translate link attention fields */ -	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; +	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;  	la->eventTag = acqe_link->event_tag; -	la->attType = att_type; -	la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); +	bf_set(lpfc_mbx_read_top_att_type, la, att_type); +	bf_set(lpfc_mbx_read_top_link_spd, la, +	       lpfc_sli4_parse_latt_link_speed(phba, acqe_link));  	/* Fake the the following irrelvant fields */ -	la->topology = TOPOLOGY_PT_PT; -	la->granted_AL_PA = 0; -	la->il = 0; -	la->pb = 0; -	la->fa = 0; -	la->mm = 0; +	bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); +	bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); +	bf_set(lpfc_mbx_read_top_il, la, 0); +	bf_set(lpfc_mbx_read_top_pb, la, 0); +	bf_set(lpfc_mbx_read_top_fa, la, 0); +	bf_set(lpfc_mbx_read_top_mm, la, 0); + +	/* Invoke the lpfc_handle_latt mailbox command callback function */ +	lpfc_mbx_cmpl_read_topology(phba, pmb); + +	return; + +out_free_dmabuf: +	kfree(mp); +out_free_pmb: +	mempool_free(pmb, phba->mbox_mem_pool); +} +/** + * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event + * @phba: pointer to lpfc hba data structure. + * @acqe_fc: pointer to the async fc completion queue entry. + * + * This routine is to handle the SLI4 asynchronous FC event. It will simply log + * that the event was received and then issue a read_topology mailbox command so + * that the rest of the driver will treat it the same as SLI3. + **/ +static void +lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) +{ +	struct lpfc_dmabuf *mp; +	LPFC_MBOXQ_t *pmb; +	int rc; + +	if (bf_get(lpfc_trailer_type, acqe_fc) != +	    LPFC_FC_LA_EVENT_TYPE_FC_LINK) { +		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +				"2895 Non FC link Event detected.(%d)\n", +				bf_get(lpfc_trailer_type, acqe_fc)); +		return; +	}  	/* Keep the link status for extra SLI4 state machine reference */  	phba->sli4_hba.link_state.speed = -				bf_get(lpfc_acqe_link_speed, acqe_link); -	phba->sli4_hba.link_state.duplex = -				bf_get(lpfc_acqe_link_duplex, acqe_link); +			lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, +				bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); +	phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; +	phba->sli4_hba.link_state.topology = +				bf_get(lpfc_acqe_fc_la_topology, acqe_fc);  	phba->sli4_hba.link_state.status = -				bf_get(lpfc_acqe_link_status, acqe_link); -	phba->sli4_hba.link_state.physical = -				bf_get(lpfc_acqe_link_physical, acqe_link); +				bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); +	phba->sli4_hba.link_state.type = +				bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); +	phba->sli4_hba.link_state.number = +				bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);  	phba->sli4_hba.link_state.fault = -				bf_get(lpfc_acqe_link_fault, acqe_link); +				bf_get(lpfc_acqe_link_fault, acqe_fc);  	phba->sli4_hba.link_state.logical_speed = -				bf_get(lpfc_acqe_qos_link_speed, acqe_link); +				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; +	lpfc_printf_log(phba, KERN_INFO, LOG_SLI, +			"2896 Async FC event - Speed:%dGBaud Topology:x%x " +			"LA Type:x%x Port Type:%d Port Number:%d Logical speed:" +			"%dMbps Fault:%d\n", +			phba->sli4_hba.link_state.speed, +			phba->sli4_hba.link_state.topology, +			phba->sli4_hba.link_state.status, +			phba->sli4_hba.link_state.type, +			phba->sli4_hba.link_state.number, +			phba->sli4_hba.link_state.logical_speed, +			phba->sli4_hba.link_state.fault); +	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); +	if (!pmb) { +		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +				"2897 The mboxq allocation failed\n"); +		return; +	} +	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); +	if (!mp) { +		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +				"2898 The lpfc_dmabuf allocation failed\n"); +		goto out_free_pmb; +	} +	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); +	if (!mp->virt) { +		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +				"2899 The mbuf allocation failed\n"); +		goto out_free_dmabuf; +	} -	/* Invoke the lpfc_handle_latt mailbox command callback function */ -	lpfc_mbx_cmpl_read_la(phba, pmb); +	/* Cleanup any outstanding ELS commands */ +	lpfc_els_flush_all_cmd(phba); +	/* Block ELS IOCBs until we have done process link event */ +	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; + +	/* Update link event statistics */ +	phba->sli.slistat.link_event++; + +	/* Create lpfc_handle_latt mailbox command from link ACQE */ +	lpfc_read_topology(phba, pmb, mp); +	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; +	pmb->vport = phba->pport; + +	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); +	if (rc == MBX_NOT_FINISHED) +		goto out_free_dmabuf;  	return;  out_free_dmabuf: @@ -3209,6 +4018,92 @@ out_free_pmb:  }  /** + * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event + * @phba: pointer to lpfc hba data structure. + * @acqe_fc: pointer to the async SLI completion queue entry. + * + * This routine is to handle the SLI4 asynchronous SLI events. + **/ +static void +lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) +{ +	char port_name; +	char message[128]; +	uint8_t status; +	struct lpfc_acqe_misconfigured_event *misconfigured; + +	/* special case misconfigured event as it contains data for all ports */ +	if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != +		 LPFC_SLI_INTF_IF_TYPE_2) || +		(bf_get(lpfc_trailer_type, acqe_sli) != +			LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) { +		lpfc_printf_log(phba, KERN_INFO, LOG_SLI, +				"2901 Async SLI event - Event Data1:x%08x Event Data2:" +				"x%08x SLI Event Type:%d\n", +				acqe_sli->event_data1, acqe_sli->event_data2, +				bf_get(lpfc_trailer_type, acqe_sli)); +		return; +	} + +	port_name = phba->Port[0]; +	if (port_name == 0x00) +		port_name = '?'; /* get port name is empty */ + +	misconfigured = (struct lpfc_acqe_misconfigured_event *) +					&acqe_sli->event_data1; + +	/* fetch the status for this port */ +	switch (phba->sli4_hba.lnk_info.lnk_no) { +	case LPFC_LINK_NUMBER_0: +		status = bf_get(lpfc_sli_misconfigured_port0, +					&misconfigured->theEvent); +		break; +	case LPFC_LINK_NUMBER_1: +		status = bf_get(lpfc_sli_misconfigured_port1, +					&misconfigured->theEvent); +		break; +	case LPFC_LINK_NUMBER_2: +		status = bf_get(lpfc_sli_misconfigured_port2, +					&misconfigured->theEvent); +		break; +	case LPFC_LINK_NUMBER_3: +		status = bf_get(lpfc_sli_misconfigured_port3, +					&misconfigured->theEvent); +		break; +	default: +		status = ~LPFC_SLI_EVENT_STATUS_VALID; +		break; +	} + +	switch (status) { +	case LPFC_SLI_EVENT_STATUS_VALID: +		return; /* no message if the sfp is okay */ +	case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: +		sprintf(message, "Optics faulted/incorrectly installed/not " \ +				"installed - Reseat optics, if issue not " +				"resolved, replace."); +		break; +	case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: +		sprintf(message, +			"Optics of two types installed - Remove one optic or " \ +			"install matching pair of optics."); +		break; +	case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: +		sprintf(message, "Incompatible optics - Replace with " \ +				"compatible optics for card to function."); +		break; +	default: +		/* firmware is reporting a status we don't know about */ +		sprintf(message, "Unknown event status x%02x", status); +		break; +	} + +	lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +			"3176 Misconfigured Physical Port - " +			"Port Name %c %s\n", port_name, message); +} + +/**   * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport   * @vport: pointer to vport data structure.   * @@ -3247,10 +4142,12 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)  		if (!ndlp)  			return 0;  	} -	if (phba->pport->port_state < LPFC_FLOGI) +	if ((phba->pport->port_state < LPFC_FLOGI) && +		(phba->pport->port_state != LPFC_VPORT_FAILED))  		return NULL;  	/* If virtual link is not yet instantiated ignore CVL */ -	if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)) +	if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) +		&& (vport->port_state != LPFC_VPORT_FAILED))  		return NULL;  	shost = lpfc_shost_from_vport(vport);  	if (!shost) @@ -3285,17 +4182,17 @@ lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)  }  /** - * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event + * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event   * @phba: pointer to lpfc hba data structure.   * @acqe_link: pointer to the async fcoe completion queue entry.   *   * This routine is to handle the SLI4 asynchronous fcoe event.   **/  static void -lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, -			 struct lpfc_acqe_fcoe *acqe_fcoe) +lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, +			struct lpfc_acqe_fip *acqe_fip)  { -	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); +	uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);  	int rc;  	struct lpfc_vport *vport;  	struct lpfc_nodelist *ndlp; @@ -3304,25 +4201,25 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,  	struct lpfc_vport **vports;  	int i; -	phba->fc_eventTag = acqe_fcoe->event_tag; -	phba->fcoe_eventtag = acqe_fcoe->event_tag; +	phba->fc_eventTag = acqe_fip->event_tag; +	phba->fcoe_eventtag = acqe_fip->event_tag;  	switch (event_type) { -	case LPFC_FCOE_EVENT_TYPE_NEW_FCF: -	case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: -		if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) +	case LPFC_FIP_EVENT_TYPE_NEW_FCF: +	case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: +		if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)  			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |  					LOG_DISCOVERY,  					"2546 New FCF event, evt_tag:x%x, "  					"index:x%x\n", -					acqe_fcoe->event_tag, -					acqe_fcoe->index); +					acqe_fip->event_tag, +					acqe_fip->index);  		else  			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |  					LOG_DISCOVERY,  					"2788 FCF param modified event, "  					"evt_tag:x%x, index:x%x\n", -					acqe_fcoe->event_tag, -					acqe_fcoe->index); +					acqe_fip->event_tag, +					acqe_fip->index);  		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {  			/*  			 * During period of FCF discovery, read the FCF @@ -3333,8 +4230,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,  					LOG_DISCOVERY,  					"2779 Read FCF (x%x) for updating "  					"roundrobin FCF failover bmask\n", -					acqe_fcoe->index); -			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); +					acqe_fip->index); +			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);  		}  		/* If the FCF discovery is in progress, do nothing. */ @@ -3360,7 +4257,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,  		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,  				"2770 Start FCF table scan per async FCF "  				"event, evt_tag:x%x, index:x%x\n", -				acqe_fcoe->event_tag, acqe_fcoe->index); +				acqe_fip->event_tag, acqe_fip->index);  		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,  						     LPFC_FCOE_FCF_GET_FIRST);  		if (rc) @@ -3369,17 +4266,18 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,  					"command failed (x%x)\n", rc);  		break; -	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: +	case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:  		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,  			"2548 FCF Table full count 0x%x tag 0x%x\n", -			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), -			acqe_fcoe->event_tag); +			bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), +			acqe_fip->event_tag);  		break; -	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: +	case LPFC_FIP_EVENT_TYPE_FCF_DEAD: +		phba->fcoe_cvl_eventtag = acqe_fip->event_tag;  		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,  			"2549 FCF (x%x) disconnected from network, " -			"tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); +			"tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);  		/*  		 * If we are in the middle of FCF failover process, clear  		 * the corresponding FCF bit in the roundrobin bitmap. @@ -3388,13 +4286,13 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,  		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {  			spin_unlock_irq(&phba->hbalock);  			/* Update FLOGI FCF failover eligible FCF bmask */ -			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index); +			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);  			break;  		}  		spin_unlock_irq(&phba->hbalock);  		/* If the event is not for currently used fcf do nothing */ -		if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) +		if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)  			break;  		/* @@ -3411,7 +4309,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,  		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,  				"2771 Start FCF fast failover process due to "  				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x " -				"\n", acqe_fcoe->event_tag, acqe_fcoe->index); +				"\n", acqe_fip->event_tag, acqe_fip->index);  		rc = lpfc_sli4_redisc_fcf_table(phba);  		if (rc) {  			lpfc_printf_log(phba, KERN_ERR, LOG_FIP | @@ -3429,8 +4327,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,  			lpfc_sli4_fcf_dead_failthrough(phba);  		} else {  			/* Reset FCF roundrobin bmask for new discovery */ -			memset(phba->fcf.fcf_rr_bmask, 0, -			       sizeof(*phba->fcf.fcf_rr_bmask)); +			lpfc_sli4_clear_fcf_rr_bmask(phba);  			/*  			 * Handling fast FCF failover to a DEAD FCF event is  			 * considered equalivant to receiving CVL to all vports. @@ -3438,12 +4335,14 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,  			lpfc_sli4_perform_all_vport_cvl(phba);  		}  		break; -	case LPFC_FCOE_EVENT_TYPE_CVL: +	case LPFC_FIP_EVENT_TYPE_CVL: +		phba->fcoe_cvl_eventtag = acqe_fip->event_tag;  		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,  			"2718 Clear Virtual Link Received for VPI 0x%x" -			" tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); +			" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); +  		vport = lpfc_find_vport_by_vpid(phba, -				acqe_fcoe->index - phba->vpi_base); +						acqe_fip->index);  		ndlp = lpfc_sli4_perform_vport_cvl(vport);  		if (!ndlp)  			break; @@ -3468,7 +4367,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,  			 * If there are other active VLinks present,  			 * re-instantiate the Vlink using FDISC.  			 */ -			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); +			mod_timer(&ndlp->nlp_delayfunc, +				  jiffies + msecs_to_jiffies(1000));  			shost = lpfc_shost_from_vport(vport);  			spin_lock_irq(shost->host_lock);  			ndlp->nlp_flag |= NLP_DELAY_TMO; @@ -3494,7 +4394,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,  			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |  					LOG_DISCOVERY,  					"2773 Start FCF failover per CVL, " -					"evt_tag:x%x\n", acqe_fcoe->event_tag); +					"evt_tag:x%x\n", acqe_fip->event_tag);  			rc = lpfc_sli4_redisc_fcf_table(phba);  			if (rc) {  				lpfc_printf_log(phba, KERN_ERR, LOG_FIP | @@ -3515,14 +4415,13 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,  				 * Reset FCF roundrobin bmask for new  				 * discovery.  				 */ -				memset(phba->fcf.fcf_rr_bmask, 0, -				       sizeof(*phba->fcf.fcf_rr_bmask)); +				lpfc_sli4_clear_fcf_rr_bmask(phba);  		}  		break;  	default:  		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,  			"0288 Unknown FCoE event type 0x%x event tag " -			"0x%x\n", event_type, acqe_fcoe->event_tag); +			"0x%x\n", event_type, acqe_fip->event_tag);  		break;  	}  } @@ -3563,11 +4462,11 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,  	phba->fcoe_eventtag = acqe_grp5->event_tag;  	prev_ll_spd = phba->sli4_hba.link_state.logical_speed;  	phba->sli4_hba.link_state.logical_speed = -		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); +		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;  	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,  			"2789 GRP5 Async Event: Updating logical link speed " -			"from %dMbps to %dMbps\n", (prev_ll_spd * 10), -			(phba->sli4_hba.link_state.logical_speed*10)); +			"from %dMbps to %dMbps\n", prev_ll_spd, +			phba->sli4_hba.link_state.logical_speed);  }  /** @@ -3599,8 +4498,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)  						 &cq_event->cqe.acqe_link);  			break;  		case LPFC_TRAILER_CODE_FCOE: -			lpfc_sli4_async_fcoe_evt(phba, -						 &cq_event->cqe.acqe_fcoe); +			lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);  			break;  		case LPFC_TRAILER_CODE_DCBX:  			lpfc_sli4_async_dcbx_evt(phba, @@ -3610,6 +4508,12 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)  			lpfc_sli4_async_grp5_evt(phba,  						 &cq_event->cqe.acqe_grp5);  			break; +		case LPFC_TRAILER_CODE_FC: +			lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); +			break; +		case LPFC_TRAILER_CODE_SLI: +			lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); +			break;  		default:  			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,  					"1804 Invalid asynchrous event code: " @@ -3740,7 +4644,7 @@ static int  lpfc_enable_pci_dev(struct lpfc_hba *phba)  {  	struct pci_dev *pdev; -	int bars; +	int bars = 0;  	/* Obtain PCI device reference */  	if (!phba->pcidev) @@ -3760,11 +4664,17 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)  	pci_try_set_mwi(pdev);  	pci_save_state(pdev); +	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */ +	if (pci_is_pcie(pdev)) +		pdev->needs_freset = 1; +  	return 0;  out_disable_device:  	pci_disable_device(pdev);  out_error: +	lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +			"1401 Failed to enable pci device, bars:x%x\n", bars);  	return -ENODEV;  } @@ -3791,8 +4701,6 @@ lpfc_disable_pci_dev(struct lpfc_hba *phba)  	/* Release PCI resource and disable PCI device */  	pci_release_selected_regions(pdev, bars);  	pci_disable_device(pdev); -	/* Null out PCI private reference to driver */ -	pci_set_drvdata(pdev, NULL);  	return;  } @@ -3814,7 +4722,10 @@ lpfc_reset_hba(struct lpfc_hba *phba)  		phba->link_state = LPFC_HBA_ERROR;  		return;  	} -	lpfc_offline_prep(phba); +	if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) +		lpfc_offline_prep(phba, LPFC_MBX_WAIT); +	else +		lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);  	lpfc_offline(phba);  	lpfc_sli_brdrestart(phba);  	lpfc_online(phba); @@ -3822,6 +4733,70 @@ lpfc_reset_hba(struct lpfc_hba *phba)  }  /** + * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions + * @phba: pointer to lpfc hba data structure. + * + * This function enables the PCI SR-IOV virtual functions to a physical + * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to + * enable the number of virtual functions to the physical function. As + * not all devices support SR-IOV, the return code from the pci_enable_sriov() + * API call does not considered as an error condition for most of the device. + **/ +uint16_t +lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) +{ +	struct pci_dev *pdev = phba->pcidev; +	uint16_t nr_virtfn; +	int pos; + +	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); +	if (pos == 0) +		return 0; + +	pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); +	return nr_virtfn; +} + +/** + * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions + * @phba: pointer to lpfc hba data structure. + * @nr_vfn: number of virtual functions to be enabled. + * + * This function enables the PCI SR-IOV virtual functions to a physical + * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to + * enable the number of virtual functions to the physical function. As + * not all devices support SR-IOV, the return code from the pci_enable_sriov() + * API call does not considered as an error condition for most of the device. + **/ +int +lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) +{ +	struct pci_dev *pdev = phba->pcidev; +	uint16_t max_nr_vfn; +	int rc; + +	max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); +	if (nr_vfn > max_nr_vfn) { +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"3057 Requested vfs (%d) greater than " +				"supported vfs (%d)", nr_vfn, max_nr_vfn); +		return -EINVAL; +	} + +	rc = pci_enable_sriov(pdev, nr_vfn); +	if (rc) { +		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, +				"2806 Failed to enable sriov on this device " +				"with vfn number nr_vf:%d, rc:%d\n", +				nr_vfn, rc); +	} else +		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, +				"2807 Successful enable sriov on this device " +				"with vfn number nr_vf:%d\n", nr_vfn); +	return rc; +} + +/**   * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.   * @phba: pointer to lpfc hba data structure.   * @@ -3836,6 +4811,7 @@ static int  lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)  {  	struct lpfc_sli *psli; +	int rc;  	/*  	 * Initialize timers used by driver @@ -3877,24 +4853,60 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)  			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;  	} +	if (!phba->sli.ring) +		phba->sli.ring = (struct lpfc_sli_ring *) +			kzalloc(LPFC_SLI3_MAX_RING * +			sizeof(struct lpfc_sli_ring), GFP_KERNEL); +	if (!phba->sli.ring) +		return -ENOMEM; +  	/* -	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size +	 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size  	 * used to create the sg_dma_buf_pool must be dynamically calculated. -	 * 2 segments are added since the IOCB needs a command and response bde.  	 */ -	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + -		sizeof(struct fcp_rsp) + -			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); +	/* Initialize the host templates the configured values. */ +	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; +	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + +	/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */  	if (phba->cfg_enable_bg) { -		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; -		phba->cfg_sg_dma_buf_size += -			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); +		/* +		 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, +		 * the FCP rsp, and a BDE for each. Sice we have no control +		 * over how many protection data segments the SCSI Layer +		 * will hand us (ie: there could be one for every block +		 * in the IO), we just allocate enough BDEs to accomidate +		 * our max amount and we need to limit lpfc_sg_seg_cnt to +		 * minimize the risk of running out. +		 */ +		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + +			sizeof(struct fcp_rsp) + +			(LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); + +		if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) +			phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; + +		/* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ +		phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; +	} else { +		/* +		 * The scsi_buf for a regular I/O will hold the FCP cmnd, +		 * the FCP rsp, a BDE for each, and a BDE for up to +		 * cfg_sg_seg_cnt data segments. +		 */ +		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + +			sizeof(struct fcp_rsp) + +			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + +		/* Total BDEs in BPL for scsi_sg_list */ +		phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;  	} -	/* Also reinitialize the host templates with new values. */ -	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; -	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; +	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, +			"9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", +			phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, +			phba->cfg_total_seg_cnt);  	phba->max_vpi = LPFC_MAX_VPI;  	/* This will be set to correct value after config_port mbox */ @@ -3910,6 +4922,23 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)  	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))  		return -ENOMEM; +	/* +	 * Enable sr-iov virtual functions if supported and configured +	 * through the module parameter. +	 */ +	if (phba->cfg_sriov_nr_virtfn > 0) { +		rc = lpfc_sli_probe_sriov_nr_virtfn(phba, +						 phba->cfg_sriov_nr_virtfn); +		if (rc) { +			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, +					"2808 Requested number of SR-IOV " +					"virtual functions (%d) is not " +					"supported\n", +					phba->cfg_sriov_nr_virtfn); +			phba->cfg_sriov_nr_virtfn = 0; +		} +	} +  	return 0;  } @@ -3943,12 +4972,17 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)  static int  lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)  { +	struct lpfc_vector_map_info *cpup;  	struct lpfc_sli *psli;  	LPFC_MBOXQ_t *mboxq; -	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; +	int rc, i, hbq_count, max_buf_size;  	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};  	struct lpfc_mqe *mqe;  	int longs; +	int fof_vectors = 0; + +	/* Get all the module params for configuring this host */ +	lpfc_get_cfgparam(phba);  	/* Before proceed, wait for POST done and device ready */  	rc = lpfc_sli4_post_status_check(phba); @@ -3963,6 +4997,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)  	init_timer(&phba->hb_tmofunc);  	phba->hb_tmofunc.function = lpfc_hb_timeout;  	phba->hb_tmofunc.data = (unsigned long)phba; +	init_timer(&phba->rrq_tmr); +	phba->rrq_tmr.function = lpfc_rrq_timeout; +	phba->rrq_tmr.data = (unsigned long)phba;  	psli = &phba->sli;  	/* MBOX heartbeat timer */ @@ -3983,15 +5020,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)  	phba->fcf.redisc_wait.data = (unsigned long)phba;  	/* -	 * We need to do a READ_CONFIG mailbox command here before -	 * calling lpfc_get_cfgparam. For VFs this will report the -	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. -	 * All of the resources allocated -	 * for this Port are tied to these values. +	 * Control structure for handling external multi-buffer mailbox +	 * command pass-through.  	 */ -	/* Get all the module params for configuring this host */ -	lpfc_get_cfgparam(phba); +	memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, +		sizeof(struct lpfc_mbox_ext_buf_ctx)); +	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); +  	phba->max_vpi = LPFC_MAX_VPI; +  	/* This will be set to correct value after the read_config mbox */  	phba->max_vports = 0; @@ -4002,30 +5039,80 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)  	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;  	/* -	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size +	 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands +	 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. +	 */ +	if (!phba->sli.ring) +		phba->sli.ring = kzalloc( +			(LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) * +			sizeof(struct lpfc_sli_ring), GFP_KERNEL); +	if (!phba->sli.ring) +		return -ENOMEM; + +	/* +	 * It doesn't matter what family our adapter is in, we are +	 * limited to 2 Pages, 512 SGEs, for our SGL. +	 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp +	 */ +	max_buf_size = (2 * SLI4_PAGE_SIZE); +	if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) +		phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; + +	/* +	 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size  	 * used to create the sg_dma_buf_pool must be dynamically calculated. -	 * 2 segments are added since the IOCB needs a command and response bde. -	 * To insure that the scsi sgl does not cross a 4k page boundary only -	 * sgl sizes of must be a power of 2.  	 */ -	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + -		    ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); -	/* Feature Level 1 hardware is limited to 2 pages */ -	if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) == -	     LPFC_SLI_INTF_FEATURELEVEL1_1)) -		max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; + +	if (phba->cfg_enable_bg) { +		/* +		 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, +		 * the FCP rsp, and a SGE for each. Sice we have no control +		 * over how many protection data segments the SCSI Layer +		 * will hand us (ie: there could be one for every block +		 * in the IO), we just allocate enough SGEs to accomidate +		 * our max amount and we need to limit lpfc_sg_seg_cnt to +		 * minimize the risk of running out. +		 */ +		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + +			sizeof(struct fcp_rsp) + max_buf_size; + +		/* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ +		phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; + +		if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) +			phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF; +	} else { +		/* +		 * The scsi_buf for a regular I/O will hold the FCP cmnd, +		 * the FCP rsp, a SGE for each, and a SGE for up to +		 * cfg_sg_seg_cnt data segments. +		 */ +		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + +			sizeof(struct fcp_rsp) + +			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); + +		/* Total SGEs for scsi_sg_list */ +		phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; +		/* +		 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need +		 * to post 1 page for the SGL. +		 */ +	} + +	/* Initialize the host templates with the updated values. */ +	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; +	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + +	if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ) +		phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;  	else -		max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; -	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; -	     dma_buf_size < max_buf_size && buf_size > dma_buf_size; -	     dma_buf_size = dma_buf_size << 1) -		; -	if (dma_buf_size == max_buf_size) -		phba->cfg_sg_seg_cnt = (dma_buf_size - -			sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - -			(2 * sizeof(struct sli4_sge))) / -				sizeof(struct sli4_sge); -	phba->cfg_sg_dma_buf_size = dma_buf_size; +		phba->cfg_sg_dma_buf_size = +			SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); + +	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, +			"9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", +			phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, +			phba->cfg_total_seg_cnt);  	/* Initialize buffer queue management fields */  	hbq_count = lpfc_sli_hbq_count(); @@ -4045,7 +5132,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)  	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);  	/* -	 * Initialize dirver internal slow-path work queues +	 * Initialize driver internal slow-path work queues  	 */  	/* Driver internel slow-path CQ Event pool */ @@ -4061,6 +5148,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)  	/* Receive queue CQ Event work queue list */  	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); +	/* Initialize extent block lists. */ +	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); +	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); +	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); +	INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); +  	/* Initialize the driver internal SLI layer lists. */  	lpfc_sli_setup(phba);  	lpfc_sli_queue_setup(phba); @@ -4070,6 +5163,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)  	if (rc)  		return -ENOMEM; +	/* IF Type 2 ports get initialized now. */ +	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == +	    LPFC_SLI_INTF_IF_TYPE_2) { +		rc = lpfc_pci_function_reset(phba); +		if (unlikely(rc)) +			return -ENODEV; +	} +  	/* Create the bootstrap mailbox command */  	rc = lpfc_create_bootstrap_mbox(phba);  	if (unlikely(rc)) @@ -4080,20 +5181,22 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)  	if (unlikely(rc))  		goto out_free_bsmbx; -	rc = lpfc_sli4_fw_cfg_check(phba); -	if (unlikely(rc)) -		goto out_free_bsmbx; -  	/* Set up the hba's configuration parameters. */  	rc = lpfc_sli4_read_config(phba);  	if (unlikely(rc))  		goto out_free_bsmbx; - -	/* Perform a function reset */ -	rc = lpfc_pci_function_reset(phba); +	rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);  	if (unlikely(rc))  		goto out_free_bsmbx; +	/* IF Type 0 ports get initialized now. */ +	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == +	    LPFC_SLI_INTF_IF_TYPE_0) { +		rc = lpfc_pci_function_reset(phba); +		if (unlikely(rc)) +			goto out_free_bsmbx; +	} +  	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,  						       GFP_KERNEL);  	if (!mboxq) { @@ -4101,60 +5204,73 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)  		goto out_free_bsmbx;  	} -	/* Get the Supported Pages. It is always available. */ +	/* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */  	lpfc_supported_pages(mboxq);  	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); -	if (unlikely(rc)) { -		rc = -EIO; -		mempool_free(mboxq, phba->mbox_mem_pool); -		goto out_free_bsmbx; -	} - -	mqe = &mboxq->u.mqe; -	memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), -	       LPFC_MAX_SUPPORTED_PAGES); -	for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { -		switch (pn_page[i]) { -		case LPFC_SLI4_PARAMETERS: -			phba->sli4_hba.pc_sli4_params.supported = 1; -			break; -		default: -			break; +	if (!rc) { +		mqe = &mboxq->u.mqe; +		memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), +		       LPFC_MAX_SUPPORTED_PAGES); +		for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { +			switch (pn_page[i]) { +			case LPFC_SLI4_PARAMETERS: +				phba->sli4_hba.pc_sli4_params.supported = 1; +				break; +			default: +				break; +			} +		} +		/* Read the port's SLI4 Parameters capabilities if supported. */ +		if (phba->sli4_hba.pc_sli4_params.supported) +			rc = lpfc_pc_sli4_params_get(phba, mboxq); +		if (rc) { +			mempool_free(mboxq, phba->mbox_mem_pool); +			rc = -EIO; +			goto out_free_bsmbx;  		}  	} - -	/* Read the port's SLI4 Parameters capabilities if supported. */ -	if (phba->sli4_hba.pc_sli4_params.supported) -		rc = lpfc_pc_sli4_params_get(phba, mboxq); -	mempool_free(mboxq, phba->mbox_mem_pool); +	/* +	 * Get sli4 parameters that override parameters from Port capabilities. +	 * If this call fails, it isn't critical unless the SLI4 parameters come +	 * back in conflict. +	 */ +	rc = lpfc_get_sli4_parameters(phba, mboxq);  	if (rc) { -		rc = -EIO; -		goto out_free_bsmbx; +		if (phba->sli4_hba.extents_in_use && +		    phba->sli4_hba.rpi_hdrs_in_use) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"2999 Unsupported SLI4 Parameters " +				"Extents and RPI headers enabled.\n"); +			goto out_free_bsmbx; +		}  	} -	/* Create all the SLI4 queues */ -	rc = lpfc_sli4_queue_create(phba); +	mempool_free(mboxq, phba->mbox_mem_pool); + +	/* Verify OAS is supported */ +	lpfc_sli4_oas_verify(phba); +	if (phba->cfg_fof) +		fof_vectors = 1; + +	/* Verify all the SLI4 queues */ +	rc = lpfc_sli4_queue_verify(phba);  	if (rc)  		goto out_free_bsmbx;  	/* Create driver internal CQE event pool */  	rc = lpfc_sli4_cq_event_pool_create(phba);  	if (rc) -		goto out_destroy_queue; +		goto out_free_bsmbx; -	/* Initialize and populate the iocb list per host */ -	rc = lpfc_init_sgl_list(phba); -	if (rc) { -		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"1400 Failed to initialize sgl list.\n"); -		goto out_destroy_cq_event_pool; -	} +	/* Initialize sgl lists per host */ +	lpfc_init_sgl_list(phba); + +	/* Allocate and initialize active sgl array */  	rc = lpfc_init_active_sgl_array(phba);  	if (rc) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"1430 Failed to initialize sgl list.\n"); -		goto out_free_sgl_list; +		goto out_destroy_cq_event_pool;  	} -  	rc = lpfc_sli4_init_rpi_hdrs(phba);  	if (rc) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -4170,29 +5286,89 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"2759 Failed allocate memory for FCF round "  				"robin failover bmask\n"); +		rc = -ENOMEM;  		goto out_remove_rpi_hdrs;  	} -	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * -				    phba->cfg_fcp_eq_count), GFP_KERNEL); +	phba->sli4_hba.fcp_eq_hdl = +			kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * +			    (fof_vectors + phba->cfg_fcp_io_channel)), +			    GFP_KERNEL);  	if (!phba->sli4_hba.fcp_eq_hdl) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"2572 Failed allocate memory for fast-path " -				"per-EQ handle array\n"); +				"2572 Failed allocate memory for " +				"fast-path per-EQ handle array\n"); +		rc = -ENOMEM;  		goto out_free_fcf_rr_bmask;  	}  	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * -				      phba->sli4_hba.cfg_eqn), GFP_KERNEL); +				  (fof_vectors + +				   phba->cfg_fcp_io_channel)), GFP_KERNEL);  	if (!phba->sli4_hba.msix_entries) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"2573 Failed allocate memory for msi-x "  				"interrupt vector entries\n"); +		rc = -ENOMEM;  		goto out_free_fcp_eq_hdl;  	} -	return rc; +	phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) * +					 phba->sli4_hba.num_present_cpu), +					 GFP_KERNEL); +	if (!phba->sli4_hba.cpu_map) { +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"3327 Failed allocate memory for msi-x " +				"interrupt vector mapping\n"); +		rc = -ENOMEM; +		goto out_free_msix; +	} +	if (lpfc_used_cpu == NULL) { +		lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu), +					 GFP_KERNEL); +		if (!lpfc_used_cpu) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"3335 Failed allocate memory for msi-x " +					"interrupt vector mapping\n"); +			kfree(phba->sli4_hba.cpu_map); +			rc = -ENOMEM; +			goto out_free_msix; +		} +		for (i = 0; i < lpfc_present_cpu; i++) +			lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; +	} + +	/* Initialize io channels for round robin */ +	cpup = phba->sli4_hba.cpu_map; +	rc = 0; +	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { +		cpup->channel_id = rc; +		rc++; +		if (rc >= phba->cfg_fcp_io_channel) +			rc = 0; +	} + +	/* +	 * Enable sr-iov virtual functions if supported and configured +	 * through the module parameter. +	 */ +	if (phba->cfg_sriov_nr_virtfn > 0) { +		rc = lpfc_sli_probe_sriov_nr_virtfn(phba, +						 phba->cfg_sriov_nr_virtfn); +		if (rc) { +			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, +					"3020 Requested number of SR-IOV " +					"virtual functions (%d) is not " +					"supported\n", +					phba->cfg_sriov_nr_virtfn); +			phba->cfg_sriov_nr_virtfn = 0; +		} +	} + +	return 0; +out_free_msix: +	kfree(phba->sli4_hba.msix_entries);  out_free_fcp_eq_hdl:  	kfree(phba->sli4_hba.fcp_eq_hdl);  out_free_fcf_rr_bmask: @@ -4201,12 +5377,8 @@ out_remove_rpi_hdrs:  	lpfc_sli4_remove_rpi_hdrs(phba);  out_free_active_sgl:  	lpfc_free_active_sgl(phba); -out_free_sgl_list: -	lpfc_free_sgl_list(phba);  out_destroy_cq_event_pool:  	lpfc_sli4_cq_event_pool_destroy(phba); -out_destroy_queue: -	lpfc_sli4_queue_destroy(phba);  out_free_bsmbx:  	lpfc_destroy_bootstrap_mbox(phba);  out_free_mem: @@ -4226,6 +5398,12 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)  {  	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; +	/* Free memory allocated for msi-x interrupt vector to CPU mapping */ +	kfree(phba->sli4_hba.cpu_map); +	phba->sli4_hba.num_present_cpu = 0; +	phba->sli4_hba.num_online_cpu = 0; +	phba->sli4_hba.curr_disp_cpu = 0; +  	/* Free memory allocated for msi-x interrupt vector entries */  	kfree(phba->sli4_hba.msix_entries); @@ -4241,18 +5419,15 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)  	/* Free the ELS sgl list */  	lpfc_free_active_sgl(phba); -	lpfc_free_sgl_list(phba); - -	/* Free the SCSI sgl management array */ -	kfree(phba->sli4_hba.lpfc_scsi_psb_array); - -	/* Free the SLI4 queues */ -	lpfc_sli4_queue_destroy(phba); +	lpfc_free_els_sgl_list(phba);  	/* Free the completion queue EQ event pool */  	lpfc_sli4_cq_event_release_all(phba);  	lpfc_sli4_cq_event_pool_destroy(phba); +	/* Release resource identifiers. */ +	lpfc_sli4_dealloc_resource_identifiers(phba); +  	/* Free the bsmbx region. */  	lpfc_destroy_bootstrap_mbox(phba); @@ -4270,7 +5445,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)  }  /** - * lpfc_init_api_table_setup - Set up init api fucntion jump table + * lpfc_init_api_table_setup - Set up init api function jump table   * @phba: The hba struct for which this call is being executed.   * @dev_grp: The HBA PCI-Device group number.   * @@ -4284,6 +5459,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)  {  	phba->lpfc_hba_init_link = lpfc_hba_init_link;  	phba->lpfc_hba_down_link = lpfc_hba_down_link; +	phba->lpfc_selective_reset = lpfc_selective_reset;  	switch (dev_grp) {  	case LPFC_PCI_DEV_LP:  		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; @@ -4336,8 +5512,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)  	init_waitqueue_head(&phba->work_waitq);  	/* Initialize the scsi buffer list used by driver for scsi IO */ -	spin_lock_init(&phba->scsi_buf_list_lock); -	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); +	spin_lock_init(&phba->scsi_buf_list_get_lock); +	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); +	spin_lock_init(&phba->scsi_buf_list_put_lock); +	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);  	/* Initialize the fabric iocb list */  	INIT_LIST_HEAD(&phba->fabric_iocb_list); @@ -4348,6 +5526,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)  	/* Initialize FCF connection rec list */  	INIT_LIST_HEAD(&phba->fcf_conn_rec_list); +	/* Initialize OAS configuration list */ +	spin_lock_init(&phba->devicelock); +	INIT_LIST_HEAD(&phba->luns); +  	return 0;  } @@ -4452,6 +5634,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)  				"Unloading driver.\n", __func__);  			goto out_free_iocbq;  		} +		iocbq_entry->sli4_lxritag = NO_XRI;  		iocbq_entry->sli4_xritag = NO_XRI;  		spin_lock_irq(&phba->hbalock); @@ -4469,29 +5652,42 @@ out_free_iocbq:  }  /** - * lpfc_free_sgl_list - Free sgl list. + * lpfc_free_sgl_list - Free a given sgl list.   * @phba: pointer to lpfc hba data structure. + * @sglq_list: pointer to the head of sgl list.   * - * This routine is invoked to free the driver's sgl list and memory. + * This routine is invoked to free a give sgl list and memory.   **/ -static void -lpfc_free_sgl_list(struct lpfc_hba *phba) +void +lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)  {  	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + +	list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { +		list_del(&sglq_entry->list); +		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); +		kfree(sglq_entry); +	} +} + +/** + * lpfc_free_els_sgl_list - Free els sgl list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the driver's els sgl list and memory. + **/ +static void +lpfc_free_els_sgl_list(struct lpfc_hba *phba) +{  	LIST_HEAD(sglq_list); +	/* Retrieve all els sgls from driver list */  	spin_lock_irq(&phba->hbalock);  	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);  	spin_unlock_irq(&phba->hbalock); -	list_for_each_entry_safe(sglq_entry, sglq_next, -				 &sglq_list, list) { -		list_del(&sglq_entry->list); -		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); -		kfree(sglq_entry); -		phba->sli4_hba.total_sglq_bufs--; -	} -	kfree(phba->sli4_hba.lpfc_els_sgl_array); +	/* Now free the sgl list */ +	lpfc_free_sgl_list(phba, &sglq_list);  }  /** @@ -4536,107 +5732,19 @@ lpfc_free_active_sgl(struct lpfc_hba *phba)   * This routine is invoked to allocate and initizlize the driver's sgl   * list and set up the sgl xritag tag array accordingly.   * - * Return codes - *	0 - successful - *	other values - error   **/ -static int +static void  lpfc_init_sgl_list(struct lpfc_hba *phba)  { -	struct lpfc_sglq *sglq_entry = NULL; -	int i; -	int els_xri_cnt; - -	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); -	lpfc_printf_log(phba, KERN_INFO, LOG_SLI, -				"2400 lpfc_init_sgl_list els %d.\n", -				els_xri_cnt);  	/* Initialize and populate the sglq list per host/VF. */  	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);  	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); -	/* Sanity check on XRI management */ -	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { -		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, -				"2562 No room left for SCSI XRI allocation: " -				"max_xri=%d, els_xri=%d\n", -				phba->sli4_hba.max_cfg_param.max_xri, -				els_xri_cnt); -		return -ENOMEM; -	} - -	/* Allocate memory for the ELS XRI management array */ -	phba->sli4_hba.lpfc_els_sgl_array = -			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), -			GFP_KERNEL); +	/* els xri-sgl book keeping */ +	phba->sli4_hba.els_xri_cnt = 0; -	if (!phba->sli4_hba.lpfc_els_sgl_array) { -		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, -				"2401 Failed to allocate memory for ELS " -				"XRI management array of size %d.\n", -				els_xri_cnt); -		return -ENOMEM; -	} - -	/* Keep the SCSI XRI into the XRI management array */ -	phba->sli4_hba.scsi_xri_max = -			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; +	/* scsi xri-buffer book keeping */  	phba->sli4_hba.scsi_xri_cnt = 0; - -	phba->sli4_hba.lpfc_scsi_psb_array = -			kzalloc((sizeof(struct lpfc_scsi_buf *) * -			phba->sli4_hba.scsi_xri_max), GFP_KERNEL); - -	if (!phba->sli4_hba.lpfc_scsi_psb_array) { -		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, -				"2563 Failed to allocate memory for SCSI " -				"XRI management array of size %d.\n", -				phba->sli4_hba.scsi_xri_max); -		kfree(phba->sli4_hba.lpfc_els_sgl_array); -		return -ENOMEM; -	} - -	for (i = 0; i < els_xri_cnt; i++) { -		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); -		if (sglq_entry == NULL) { -			printk(KERN_ERR "%s: only allocated %d sgls of " -				"expected %d count. Unloading driver.\n", -				__func__, i, els_xri_cnt); -			goto out_free_mem; -		} - -		sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); -		if (sglq_entry->sli4_xritag == NO_XRI) { -			kfree(sglq_entry); -			printk(KERN_ERR "%s: failed to allocate XRI.\n" -				"Unloading driver.\n", __func__); -			goto out_free_mem; -		} -		sglq_entry->buff_type = GEN_BUFF_TYPE; -		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); -		if (sglq_entry->virt == NULL) { -			kfree(sglq_entry); -			printk(KERN_ERR "%s: failed to allocate mbuf.\n" -				"Unloading driver.\n", __func__); -			goto out_free_mem; -		} -		sglq_entry->sgl = sglq_entry->virt; -		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); - -		/* The list order is used by later block SGL registraton */ -		spin_lock_irq(&phba->hbalock); -		sglq_entry->state = SGL_FREED; -		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); -		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; -		phba->sli4_hba.total_sglq_bufs++; -		spin_unlock_irq(&phba->hbalock); -	} -	return 0; - -out_free_mem: -	kfree(phba->sli4_hba.lpfc_scsi_psb_array); -	lpfc_free_sgl_list(phba); -	return -ENOMEM;  }  /** @@ -4644,40 +5752,26 @@ out_free_mem:   * @phba: pointer to lpfc hba data structure.   *   * This routine is invoked to post rpi header templates to the - * HBA consistent with the SLI-4 interface spec.  This routine + * port for those SLI4 ports that do not support extents.  This routine   * posts a PAGE_SIZE memory region to the port to hold up to - * PAGE_SIZE modulo 64 rpi context headers. - * No locks are held here because this is an initialization routine - * called only from probe or lpfc_online when interrupts are not - * enabled and the driver is reinitializing the device. + * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine + * and should be called only when interrupts are disabled.   *   * Return codes   * 	0 - successful - * 	-ENOMEM - No availble memory - *      -EIO - The mailbox failed to complete successfully. + *	-ERROR - otherwise.   **/  int  lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)  {  	int rc = 0; -	int longs; -	uint16_t rpi_count;  	struct lpfc_rpi_hdr *rpi_hdr;  	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); - -	/* -	 * Provision an rpi bitmask range for discovery. The total count -	 * is the difference between max and base + 1. -	 */ -	rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + -		    phba->sli4_hba.max_cfg_param.max_rpi - 1; - -	longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; -	phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), -					   GFP_KERNEL); -	if (!phba->sli4_hba.rpi_bmask) -		return -ENOMEM; +	if (!phba->sli4_hba.rpi_hdrs_in_use) +		return rc; +	if (phba->sli4_hba.extents_in_use) +		return -EIO;  	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);  	if (!rpi_hdr) { @@ -4709,11 +5803,28 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)  	uint16_t rpi_limit, curr_rpi_range;  	struct lpfc_dmabuf *dmabuf;  	struct lpfc_rpi_hdr *rpi_hdr; +	uint32_t rpi_count; +	/* +	 * If the SLI4 port supports extents, posting the rpi header isn't +	 * required.  Set the expected maximum count and let the actual value +	 * get set when extents are fully allocated. +	 */ +	if (!phba->sli4_hba.rpi_hdrs_in_use) +		return NULL; +	if (phba->sli4_hba.extents_in_use) +		return NULL; + +	/* The limit on the logical index is just the max_rpi count. */  	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + -		    phba->sli4_hba.max_cfg_param.max_rpi - 1; +	phba->sli4_hba.max_cfg_param.max_rpi - 1;  	spin_lock_irq(&phba->hbalock); +	/* +	 * Establish the starting RPI in this header block.  The starting +	 * rpi is normalized to a zero base because the physical rpi is +	 * port based. +	 */  	curr_rpi_range = phba->sli4_hba.next_rpi;  	spin_unlock_irq(&phba->hbalock); @@ -4723,8 +5834,12 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)  	 * and to allow the full max_rpi range per port.  	 */  	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) -		return NULL; +		rpi_count = rpi_limit - curr_rpi_range; +	else +		rpi_count = LPFC_RPI_HDR_COUNT; +	if (!rpi_count) +		return NULL;  	/*  	 * First allocate the protocol header region for the port.  The  	 * port expects a 4KB DMA-mapped memory region that is 4K aligned. @@ -4757,14 +5872,16 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)  	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;  	rpi_hdr->page_count = 1;  	spin_lock_irq(&phba->hbalock); -	rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; + +	/* The rpi_hdr stores the logical index only. */ +	rpi_hdr->start_rpi = curr_rpi_range;  	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);  	/* -	 * The next_rpi stores the next module-64 rpi value to post -	 * in any subsequent rpi memory region postings. +	 * The next_rpi stores the next logical module-64 rpi value used +	 * to post physical rpis in subsequent rpi postings.  	 */ -	phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; +	phba->sli4_hba.next_rpi += rpi_count;  	spin_unlock_irq(&phba->hbalock);  	return rpi_hdr; @@ -4781,15 +5898,18 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)   * @phba: pointer to lpfc hba data structure.   *   * This routine is invoked to remove all memory resources allocated - * to support rpis. This routine presumes the caller has released all - * rpis consumed by fabric or port logins and is prepared to have - * the header pages removed. + * to support rpis for SLI4 ports not supporting extents. This routine + * presumes the caller has released all rpis consumed by fabric or port + * logins and is prepared to have the header pages removed.   **/  void  lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)  {  	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; +	if (!phba->sli4_hba.rpi_hdrs_in_use) +		goto exit; +  	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,  				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {  		list_del(&rpi_hdr->list); @@ -4798,9 +5918,9 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)  		kfree(rpi_hdr->dmabuf);  		kfree(rpi_hdr);  	} - -	phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; -	memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); + exit: +	/* There are no rpis available to the port now. */ +	phba->sli4_hba.next_rpi = 0;  }  /** @@ -4856,6 +5976,10 @@ lpfc_hba_free(struct lpfc_hba *phba)  	/* Release the driver assigned board number */  	idr_remove(&lpfc_hba_index, phba->brd_no); +	/* Free memory allocated with sli rings */ +	kfree(phba->sli.ring); +	phba->sli.ring = NULL; +  	kfree(phba);  	return;  } @@ -4926,14 +6050,45 @@ lpfc_destroy_shost(struct lpfc_hba *phba)  static void  lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)  { +	uint32_t old_mask; +	uint32_t old_guard; +  	int pagecnt = 10;  	if (lpfc_prot_mask && lpfc_prot_guard) {  		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,  				"1478 Registering BlockGuard with the "  				"SCSI layer\n"); -		scsi_host_set_prot(shost, lpfc_prot_mask); -		scsi_host_set_guard(shost, lpfc_prot_guard); + +		old_mask = lpfc_prot_mask; +		old_guard = lpfc_prot_guard; + +		/* Only allow supported values */ +		lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | +			SHOST_DIX_TYPE0_PROTECTION | +			SHOST_DIX_TYPE1_PROTECTION); +		lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC); + +		/* DIF Type 1 protection for profiles AST1/C1 is end to end */ +		if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION) +			lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; + +		if (lpfc_prot_mask && lpfc_prot_guard) { +			if ((old_mask != lpfc_prot_mask) || +				(old_guard != lpfc_prot_guard)) +				lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"1475 Registering BlockGuard with the " +					"SCSI layer: mask %d  guard %d\n", +					lpfc_prot_mask, lpfc_prot_guard); + +			scsi_host_set_prot(shost, lpfc_prot_mask); +			scsi_host_set_guard(shost, lpfc_prot_guard); +		} else +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"1479 Not Registering BlockGuard with the SCSI " +				"layer, Bad protection parameters: %d %d\n", +				old_mask, old_guard);  	} +  	if (!_dump_buf_data) {  		while (pagecnt) {  			spin_lock_init(&_dump_buf_lock); @@ -5190,97 +6345,193 @@ lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)  int  lpfc_sli4_post_status_check(struct lpfc_hba *phba)  { -	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg; -	int i, port_error = -ENODEV; +	struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; +	struct lpfc_register reg_data; +	int i, port_error = 0; +	uint32_t if_type; -	if (!phba->sli4_hba.STAregaddr) +	memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); +	memset(®_data, 0, sizeof(reg_data)); +	if (!phba->sli4_hba.PSMPHRregaddr)  		return -ENODEV;  	/* Wait up to 30 seconds for the SLI Port POST done and ready */  	for (i = 0; i < 3000; i++) { -		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); -		/* Encounter fatal POST error, break out */ -		if (bf_get(lpfc_hst_state_perr, &sta_reg)) { +		if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, +			&portsmphr_reg.word0) || +			(bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { +			/* Port has a fatal POST error, break out */  			port_error = -ENODEV;  			break;  		} -		if (LPFC_POST_STAGE_ARMFW_READY == -		    bf_get(lpfc_hst_state_port_status, &sta_reg)) { -			port_error = 0; +		if (LPFC_POST_STAGE_PORT_READY == +		    bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))  			break; -		}  		msleep(10);  	} -	if (port_error) +	/* +	 * If there was a port error during POST, then don't proceed with +	 * other register reads as the data may not be valid.  Just exit. +	 */ +	if (port_error) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -			"1408 Failure HBA POST Status: sta_reg=0x%x, " -			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " -			"dl=x%x, pstatus=x%x\n", sta_reg.word0, -			bf_get(lpfc_hst_state_perr, &sta_reg), -			bf_get(lpfc_hst_state_sfi, &sta_reg), -			bf_get(lpfc_hst_state_nip, &sta_reg), -			bf_get(lpfc_hst_state_ipc, &sta_reg), -			bf_get(lpfc_hst_state_xrom, &sta_reg), -			bf_get(lpfc_hst_state_dl, &sta_reg), -			bf_get(lpfc_hst_state_port_status, &sta_reg)); - -	/* Log device information */ -	phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr); -	if (bf_get(lpfc_sli_intf_valid, -		   &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) { +			"1408 Port Failed POST - portsmphr=0x%x, " +			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " +			"scr2=x%x, hscratch=x%x, pstatus=x%x\n", +			portsmphr_reg.word0, +			bf_get(lpfc_port_smphr_perr, &portsmphr_reg), +			bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), +			bf_get(lpfc_port_smphr_nip, &portsmphr_reg), +			bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), +			bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), +			bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), +			bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), +			bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); +	} else {  		lpfc_printf_log(phba, KERN_INFO, LOG_INIT, -				"2534 Device Info: ChipType=0x%x, SliRev=0x%x, " -				"FeatureL1=0x%x, FeatureL2=0x%x\n", +				"2534 Device Info: SLIFamily=0x%x, " +				"SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " +				"SLIHint_2=0x%x, FT=0x%x\n",  				bf_get(lpfc_sli_intf_sli_family,  				       &phba->sli4_hba.sli_intf),  				bf_get(lpfc_sli_intf_slirev,  				       &phba->sli4_hba.sli_intf), -				bf_get(lpfc_sli_intf_featurelevel1, +				bf_get(lpfc_sli_intf_if_type, +				       &phba->sli4_hba.sli_intf), +				bf_get(lpfc_sli_intf_sli_hint1,  				       &phba->sli4_hba.sli_intf), -				bf_get(lpfc_sli_intf_featurelevel2, +				bf_get(lpfc_sli_intf_sli_hint2, +				       &phba->sli4_hba.sli_intf), +				bf_get(lpfc_sli_intf_func_type,  				       &phba->sli4_hba.sli_intf)); +		/* +		 * Check for other Port errors during the initialization +		 * process.  Fail the load if the port did not come up +		 * correctly. +		 */ +		if_type = bf_get(lpfc_sli_intf_if_type, +				 &phba->sli4_hba.sli_intf); +		switch (if_type) { +		case LPFC_SLI_INTF_IF_TYPE_0: +			phba->sli4_hba.ue_mask_lo = +			      readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); +			phba->sli4_hba.ue_mask_hi = +			      readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); +			uerrlo_reg.word0 = +			      readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); +			uerrhi_reg.word0 = +				readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); +			if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || +			    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { +				lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +						"1422 Unrecoverable Error " +						"Detected during POST " +						"uerr_lo_reg=0x%x, " +						"uerr_hi_reg=0x%x, " +						"ue_mask_lo_reg=0x%x, " +						"ue_mask_hi_reg=0x%x\n", +						uerrlo_reg.word0, +						uerrhi_reg.word0, +						phba->sli4_hba.ue_mask_lo, +						phba->sli4_hba.ue_mask_hi); +				port_error = -ENODEV; +			} +			break; +		case LPFC_SLI_INTF_IF_TYPE_2: +			/* Final checks.  The port status should be clean. */ +			if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, +				®_data.word0) || +				(bf_get(lpfc_sliport_status_err, ®_data) && +				 !bf_get(lpfc_sliport_status_rn, ®_data))) { +				phba->work_status[0] = +					readl(phba->sli4_hba.u.if_type2. +					      ERR1regaddr); +				phba->work_status[1] = +					readl(phba->sli4_hba.u.if_type2. +					      ERR2regaddr); +				lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"2888 Unrecoverable port error " +					"following POST: port status reg " +					"0x%x, port_smphr reg 0x%x, " +					"error 1=0x%x, error 2=0x%x\n", +					reg_data.word0, +					portsmphr_reg.word0, +					phba->work_status[0], +					phba->work_status[1]); +				port_error = -ENODEV; +			} +			break; +		case LPFC_SLI_INTF_IF_TYPE_1: +		default: +			break; +		}  	} -	phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr); -	phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr); -	/* With uncoverable error, log the error message and return error */ -	uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); -	uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); -	if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || -	    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { -		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"1422 HBA Unrecoverable error: " -				"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " -				"ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", -				uerrlo_reg.word0, uerrhi_reg.word0, -				phba->sli4_hba.ue_mask_lo, -				phba->sli4_hba.ue_mask_hi); -		return -ENODEV; -	} -  	return port_error;  }  /**   * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.   * @phba: pointer to lpfc hba data structure. + * @if_type:  The SLI4 interface type getting configured.   *   * This routine is invoked to set up SLI4 BAR0 PCI config space register   * memory map.   **/  static void -lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) -{ -	phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + -					LPFC_UERR_STATUS_LO; -	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + -					LPFC_UERR_STATUS_HI; -	phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p + -					LPFC_UE_MASK_LO; -	phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + -					LPFC_UE_MASK_HI; -	phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p + -					LPFC_SLI_INTF; +lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) +{ +	switch (if_type) { +	case LPFC_SLI_INTF_IF_TYPE_0: +		phba->sli4_hba.u.if_type0.UERRLOregaddr = +			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; +		phba->sli4_hba.u.if_type0.UERRHIregaddr = +			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; +		phba->sli4_hba.u.if_type0.UEMASKLOregaddr = +			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; +		phba->sli4_hba.u.if_type0.UEMASKHIregaddr = +			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; +		phba->sli4_hba.SLIINTFregaddr = +			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; +		break; +	case LPFC_SLI_INTF_IF_TYPE_2: +		phba->sli4_hba.u.if_type2.ERR1regaddr = +			phba->sli4_hba.conf_regs_memmap_p + +						LPFC_CTL_PORT_ER1_OFFSET; +		phba->sli4_hba.u.if_type2.ERR2regaddr = +			phba->sli4_hba.conf_regs_memmap_p + +						LPFC_CTL_PORT_ER2_OFFSET; +		phba->sli4_hba.u.if_type2.CTRLregaddr = +			phba->sli4_hba.conf_regs_memmap_p + +						LPFC_CTL_PORT_CTL_OFFSET; +		phba->sli4_hba.u.if_type2.STATUSregaddr = +			phba->sli4_hba.conf_regs_memmap_p + +						LPFC_CTL_PORT_STA_OFFSET; +		phba->sli4_hba.SLIINTFregaddr = +			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; +		phba->sli4_hba.PSMPHRregaddr = +			phba->sli4_hba.conf_regs_memmap_p + +						LPFC_CTL_PORT_SEM_OFFSET; +		phba->sli4_hba.RQDBregaddr = +			phba->sli4_hba.conf_regs_memmap_p + +						LPFC_ULP0_RQ_DOORBELL; +		phba->sli4_hba.WQDBregaddr = +			phba->sli4_hba.conf_regs_memmap_p + +						LPFC_ULP0_WQ_DOORBELL; +		phba->sli4_hba.EQCQDBregaddr = +			phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; +		phba->sli4_hba.MQDBregaddr = +			phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; +		phba->sli4_hba.BMBXregaddr = +			phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; +		break; +	case LPFC_SLI_INTF_IF_TYPE_1: +	default: +		dev_printk(KERN_ERR, &phba->pcidev->dev, +			   "FATAL - unsupported SLI4 interface type - %d\n", +			   if_type); +		break; +	}  }  /** @@ -5293,16 +6544,14 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)  static void  lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)  { - -	phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + -				    LPFC_HST_STATE; +	phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + +		LPFC_SLIPORT_IF0_SMPHR;  	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + -				    LPFC_HST_ISR0; +		LPFC_HST_ISR0;  	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + -				    LPFC_HST_IMR0; +		LPFC_HST_IMR0;  	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + -				     LPFC_HST_ISCR0; -	return; +		LPFC_HST_ISCR0;  }  /** @@ -5322,9 +6571,11 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)  		return -ENODEV;  	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + -				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); +				vf * LPFC_VFR_PAGE_SIZE + +					LPFC_ULP0_RQ_DOORBELL);  	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + -				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); +				vf * LPFC_VFR_PAGE_SIZE + +					LPFC_ULP0_WQ_DOORBELL);  	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +  				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);  	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + @@ -5446,15 +6697,21 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)   *   * Return codes   * 	0 - successful - * 	-ENOMEM - No availble memory + * 	-ENOMEM - No available memory   *      -EIO - The mailbox failed to complete successfully.   **/ -static int +int  lpfc_sli4_read_config(struct lpfc_hba *phba)  {  	LPFC_MBOXQ_t *pmb;  	struct lpfc_mbx_read_config *rd_config; -	uint32_t rc = 0; +	union  lpfc_sli4_cfg_shdr *shdr; +	uint32_t shdr_status, shdr_add_status; +	struct lpfc_mbx_get_func_cfg *get_func_cfg; +	struct lpfc_rsrc_desc_fcfcoe *desc; +	char *pdesc_0; +	uint32_t desc_count; +	int length, i, rc = 0, rc2;  	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);  	if (!pmb) { @@ -5476,6 +6733,22 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)  		rc = -EIO;  	} else {  		rd_config = &pmb->u.mqe.un.rd_config; +		if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { +			phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; +			phba->sli4_hba.lnk_info.lnk_tp = +				bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); +			phba->sli4_hba.lnk_info.lnk_no = +				bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); +			lpfc_printf_log(phba, KERN_INFO, LOG_SLI, +					"3081 lnk_type:%d, lnk_numb:%d\n", +					phba->sli4_hba.lnk_info.lnk_tp, +					phba->sli4_hba.lnk_info.lnk_no); +		} else +			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, +					"3082 Mailbox (x%x) returned ldv:x0\n", +					bf_get(lpfc_mqe_command, &pmb->u.mqe)); +		phba->sli4_hba.extents_in_use = +			bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);  		phba->sli4_hba.max_cfg_param.max_xri =  			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);  		phba->sli4_hba.max_cfg_param.xri_base = @@ -5494,8 +6767,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)  			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);  		phba->sli4_hba.max_cfg_param.max_fcfi =  			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); -		phba->sli4_hba.max_cfg_param.fcfi_base = -			bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);  		phba->sli4_hba.max_cfg_param.max_eq =  			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);  		phba->sli4_hba.max_cfg_param.max_rq = @@ -5508,16 +6779,17 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)  		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;  		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;  		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; -		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;  		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?  				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;  		phba->max_vports = phba->max_vpi;  		lpfc_printf_log(phba, KERN_INFO, LOG_SLI, -				"2003 cfg params XRI(B:%d M:%d), " +				"2003 cfg params Extents? %d " +				"XRI(B:%d M:%d), "  				"VPI(B:%d M:%d) "  				"VFI(B:%d M:%d) "  				"RPI(B:%d M:%d) " -				"FCFI(B:%d M:%d)\n", +				"FCFI(Count:%d)\n", +				phba->sli4_hba.extents_in_use,  				phba->sli4_hba.max_cfg_param.xri_base,  				phba->sli4_hba.max_cfg_param.max_xri,  				phba->sli4_hba.max_cfg_param.vpi_base, @@ -5526,212 +6798,354 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)  				phba->sli4_hba.max_cfg_param.max_vfi,  				phba->sli4_hba.max_cfg_param.rpi_base,  				phba->sli4_hba.max_cfg_param.max_rpi, -				phba->sli4_hba.max_cfg_param.fcfi_base,  				phba->sli4_hba.max_cfg_param.max_fcfi);  	} -	mempool_free(pmb, phba->mbox_mem_pool); + +	if (rc) +		goto read_cfg_out;  	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */ -	if (phba->cfg_hba_queue_depth > -		(phba->sli4_hba.max_cfg_param.max_xri - -			lpfc_sli4_get_els_iocb_cnt(phba))) -		phba->cfg_hba_queue_depth = -			phba->sli4_hba.max_cfg_param.max_xri - -				lpfc_sli4_get_els_iocb_cnt(phba); +	length = phba->sli4_hba.max_cfg_param.max_xri - +			lpfc_sli4_get_els_iocb_cnt(phba); +	if (phba->cfg_hba_queue_depth > length) { +		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, +				"3361 HBA queue depth changed from %d to %d\n", +				phba->cfg_hba_queue_depth, length); +		phba->cfg_hba_queue_depth = length; +	} + +	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != +	    LPFC_SLI_INTF_IF_TYPE_2) +		goto read_cfg_out; + +	/* get the pf# and vf# for SLI4 if_type 2 port */ +	length = (sizeof(struct lpfc_mbx_get_func_cfg) - +		  sizeof(struct lpfc_sli4_cfg_mhdr)); +	lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, +			 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, +			 length, LPFC_SLI4_MBX_EMBED); + +	rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); +	shdr = (union lpfc_sli4_cfg_shdr *) +				&pmb->u.mqe.un.sli4_config.header.cfg_shdr; +	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); +	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); +	if (rc2 || shdr_status || shdr_add_status) { +		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +				"3026 Mailbox failed , mbxCmd x%x " +				"GET_FUNCTION_CONFIG, mbxStatus x%x\n", +				bf_get(lpfc_mqe_command, &pmb->u.mqe), +				bf_get(lpfc_mqe_status, &pmb->u.mqe)); +		goto read_cfg_out; +	} + +	/* search for fc_fcoe resrouce descriptor */ +	get_func_cfg = &pmb->u.mqe.un.get_func_cfg; +	desc_count = get_func_cfg->func_cfg.rsrc_desc_count; + +	pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; +	desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; +	length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); +	if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) +		length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; +	else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) +		goto read_cfg_out; + +	for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { +		desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); +		if (LPFC_RSRC_DESC_TYPE_FCFCOE == +		    bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { +			phba->sli4_hba.iov.pf_number = +				bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); +			phba->sli4_hba.iov.vf_number = +				bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); +			break; +		} +	} + +	if (i < LPFC_RSRC_DESC_MAX_NUM) +		lpfc_printf_log(phba, KERN_INFO, LOG_SLI, +				"3027 GET_FUNCTION_CONFIG: pf_number:%d, " +				"vf_number:%d\n", phba->sli4_hba.iov.pf_number, +				phba->sli4_hba.iov.vf_number); +	else +		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +				"3028 GET_FUNCTION_CONFIG: failed to find " +				"Resrouce Descriptor:x%x\n", +				LPFC_RSRC_DESC_TYPE_FCFCOE); + +read_cfg_out: +	mempool_free(pmb, phba->mbox_mem_pool);  	return rc;  }  /** - * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. + * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.   * @phba: pointer to lpfc hba data structure.   * - * This routine is invoked to setup the host-side endian order to the - * HBA consistent with the SLI-4 interface spec. + * This routine is invoked to setup the port-side endian order when + * the port if_type is 0.  This routine has no function for other + * if_types.   *   * Return codes   * 	0 - successful - * 	-ENOMEM - No availble memory + * 	-ENOMEM - No available memory   *      -EIO - The mailbox failed to complete successfully.   **/  static int  lpfc_setup_endian_order(struct lpfc_hba *phba)  {  	LPFC_MBOXQ_t *mboxq; -	uint32_t rc = 0; +	uint32_t if_type, rc = 0;  	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,  				      HOST_ENDIAN_HIGH_WORD1}; -	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); -	if (!mboxq) { -		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"0492 Unable to allocate memory for issuing " -				"SLI_CONFIG_SPECIAL mailbox command\n"); -		return -ENOMEM; -	} +	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); +	switch (if_type) { +	case LPFC_SLI_INTF_IF_TYPE_0: +		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, +						       GFP_KERNEL); +		if (!mboxq) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"0492 Unable to allocate memory for " +					"issuing SLI_CONFIG_SPECIAL mailbox " +					"command\n"); +			return -ENOMEM; +		} -	/* -	 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two -	 * words to contain special data values and no other data. -	 */ -	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); -	memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); -	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); -	if (rc != MBX_SUCCESS) { -		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"0493 SLI_CONFIG_SPECIAL mailbox failed with " -				"status x%x\n", -				rc); -		rc = -EIO; +		/* +		 * The SLI4_CONFIG_SPECIAL mailbox command requires the first +		 * two words to contain special data values and no other data. +		 */ +		memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); +		memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); +		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); +		if (rc != MBX_SUCCESS) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"0493 SLI_CONFIG_SPECIAL mailbox " +					"failed with status x%x\n", +					rc); +			rc = -EIO; +		} +		mempool_free(mboxq, phba->mbox_mem_pool); +		break; +	case LPFC_SLI_INTF_IF_TYPE_2: +	case LPFC_SLI_INTF_IF_TYPE_1: +	default: +		break;  	} - -	mempool_free(mboxq, phba->mbox_mem_pool);  	return rc;  }  /** - * lpfc_sli4_queue_create - Create all the SLI4 queues + * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts   * @phba: pointer to lpfc hba data structure.   * - * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA - * operation. For each SLI4 queue type, the parameters such as queue entry - * count (queue depth) shall be taken from the module parameter. For now, - * we just use some constant number as place holder. + * This routine is invoked to check the user settable queue counts for EQs and + * CQs. after this routine is called the counts will be set to valid values that + * adhere to the constraints of the system's interrupt vectors and the port's + * queue resources.   *   * Return codes   *      0 - successful - *      -ENOMEM - No availble memory - *      -EIO - The mailbox failed to complete successfully. + *      -ENOMEM - No available memory   **/  static int -lpfc_sli4_queue_create(struct lpfc_hba *phba) +lpfc_sli4_queue_verify(struct lpfc_hba *phba)  { -	struct lpfc_queue *qdesc; -	int fcp_eqidx, fcp_cqidx, fcp_wqidx; -	int cfg_fcp_wq_count; -	int cfg_fcp_eq_count; +	int cfg_fcp_io_channel; +	uint32_t cpu; +	uint32_t i = 0; +	int fof_vectors = phba->cfg_fof ? 1 : 0;  	/* -	 * Sanity check for confiugred queue parameters against the run-time +	 * Sanity check for configured queue parameters against the run-time  	 * device parameters  	 */ -	/* Sanity check on FCP fast-path WQ parameters */ -	cfg_fcp_wq_count = phba->cfg_fcp_wq_count; -	if (cfg_fcp_wq_count > -	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { -		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - -				   LPFC_SP_WQN_DEF; -		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { -			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -					"2581 Not enough WQs (%d) from " -					"the pci function for supporting " -					"FCP WQs (%d)\n", -					phba->sli4_hba.max_cfg_param.max_wq, -					phba->cfg_fcp_wq_count); -			goto out_error; -		} -		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, -				"2582 Not enough WQs (%d) from the pci " -				"function for supporting the requested " -				"FCP WQs (%d), the actual FCP WQs can " -				"be supported: %d\n", -				phba->sli4_hba.max_cfg_param.max_wq, -				phba->cfg_fcp_wq_count, cfg_fcp_wq_count); -	} -	/* The actual number of FCP work queues adopted */ -	phba->cfg_fcp_wq_count = cfg_fcp_wq_count; - -	/* Sanity check on FCP fast-path EQ parameters */ -	cfg_fcp_eq_count = phba->cfg_fcp_eq_count; -	if (cfg_fcp_eq_count > -	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { -		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - -				   LPFC_SP_EQN_DEF; -		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { +	/* Sanity check on HBA EQ parameters */ +	cfg_fcp_io_channel = phba->cfg_fcp_io_channel; + +	/* It doesn't make sense to have more io channels then online CPUs */ +	for_each_present_cpu(cpu) { +		if (cpu_online(cpu)) +			i++; +	} +	phba->sli4_hba.num_online_cpu = i; +	phba->sli4_hba.num_present_cpu = lpfc_present_cpu; +	phba->sli4_hba.curr_disp_cpu = 0; + +	if (i < cfg_fcp_io_channel) { +		lpfc_printf_log(phba, +				KERN_ERR, LOG_INIT, +				"3188 Reducing IO channels to match number of " +				"online CPUs: from %d to %d\n", +				cfg_fcp_io_channel, i); +		cfg_fcp_io_channel = i; +	} + +	if (cfg_fcp_io_channel + fof_vectors > +	    phba->sli4_hba.max_cfg_param.max_eq) { +		if (phba->sli4_hba.max_cfg_param.max_eq < +		    LPFC_FCP_IO_CHAN_MIN) {  			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  					"2574 Not enough EQs (%d) from the "  					"pci function for supporting FCP "  					"EQs (%d)\n",  					phba->sli4_hba.max_cfg_param.max_eq, -					phba->cfg_fcp_eq_count); +					phba->cfg_fcp_io_channel);  			goto out_error;  		} -		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, -				"2575 Not enough EQs (%d) from the pci " -				"function for supporting the requested " -				"FCP EQs (%d), the actual FCP EQs can " -				"be supported: %d\n", -				phba->sli4_hba.max_cfg_param.max_eq, -				phba->cfg_fcp_eq_count, cfg_fcp_eq_count); -	} -	/* It does not make sense to have more EQs than WQs */ -	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { -		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, -				"2593 The FCP EQ count(%d) cannot be greater " -				"than the FCP WQ count(%d), limiting the " -				"FCP EQ count to %d\n", cfg_fcp_eq_count, -				phba->cfg_fcp_wq_count, -				phba->cfg_fcp_wq_count); -		cfg_fcp_eq_count = phba->cfg_fcp_wq_count; +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"2575 Reducing IO channels to match number of " +				"available EQs: from %d to %d\n", +				cfg_fcp_io_channel, +				phba->sli4_hba.max_cfg_param.max_eq); +		cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq - +			fof_vectors;  	} -	/* The actual number of FCP event queues adopted */ -	phba->cfg_fcp_eq_count = cfg_fcp_eq_count; -	/* The overall number of event queues used */ -	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; -	/* -	 * Create Event Queues (EQs) -	 */ +	/* The actual number of FCP event queues adopted */ +	phba->cfg_fcp_io_channel = cfg_fcp_io_channel;  	/* Get EQ depth from module parameter, fake the default for now */  	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;  	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; -	/* Create slow path event queue */ -	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, -				      phba->sli4_hba.eq_ecount); -	if (!qdesc) { +	/* Get CQ depth from module parameter, fake the default for now */ +	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; +	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; + +	return 0; +out_error: +	return -ENOMEM; +} + +/** + * lpfc_sli4_queue_create - Create all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA + * operation. For each SLI4 queue type, the parameters such as queue entry + * count (queue depth) shall be taken from the module parameter. For now, + * we just use some constant number as place holder. + * + * Return codes + *      0 - successful + *      -ENOMEM - No availble memory + *      -EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_sli4_queue_create(struct lpfc_hba *phba) +{ +	struct lpfc_queue *qdesc; +	int idx; + +	/* +	 * Create HBA Record arrays. +	 */ +	if (!phba->cfg_fcp_io_channel) +		return -ERANGE; + +	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; +	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; +	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; +	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; +	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; +	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; + +	phba->sli4_hba.hba_eq =  kzalloc((sizeof(struct lpfc_queue *) * +				phba->cfg_fcp_io_channel), GFP_KERNEL); +	if (!phba->sli4_hba.hba_eq) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"0496 Failed allocate slow-path EQ\n"); +			"2576 Failed allocate memory for " +			"fast-path EQ record array\n");  		goto out_error;  	} -	phba->sli4_hba.sp_eq = qdesc; -	/* Create fast-path FCP Event Queue(s) */ -	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * -			       phba->cfg_fcp_eq_count), GFP_KERNEL); -	if (!phba->sli4_hba.fp_eq) { +	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * +				phba->cfg_fcp_io_channel), GFP_KERNEL); +	if (!phba->sli4_hba.fcp_cq) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"2576 Failed allocate memory for fast-path " -				"EQ record array\n"); -		goto out_free_sp_eq; +				"2577 Failed allocate memory for fast-path " +				"CQ record array\n"); +		goto out_error;  	} -	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { + +	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * +				phba->cfg_fcp_io_channel), GFP_KERNEL); +	if (!phba->sli4_hba.fcp_wq) { +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"2578 Failed allocate memory for fast-path " +				"WQ record array\n"); +		goto out_error; +	} + +	/* +	 * Since the first EQ can have multiple CQs associated with it, +	 * this array is used to quickly see if we have a FCP fast-path +	 * CQ match. +	 */ +	phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) * +					 phba->cfg_fcp_io_channel), GFP_KERNEL); +	if (!phba->sli4_hba.fcp_cq_map) { +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"2545 Failed allocate memory for fast-path " +				"CQ map\n"); +		goto out_error; +	} + +	/* +	 * Create HBA Event Queues (EQs).  The cfg_fcp_io_channel specifies +	 * how many EQs to create. +	 */ +	for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { + +		/* Create EQs */  		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,  					      phba->sli4_hba.eq_ecount);  		if (!qdesc) {  			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -					"0497 Failed allocate fast-path EQ\n"); -			goto out_free_fp_eq; +					"0497 Failed allocate EQ (%d)\n", idx); +			goto out_error;  		} -		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; +		phba->sli4_hba.hba_eq[idx] = qdesc; + +		/* Create Fast Path FCP CQs */ +		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, +					      phba->sli4_hba.cq_ecount); +		if (!qdesc) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"0499 Failed allocate fast-path FCP " +					"CQ (%d)\n", idx); +			goto out_error; +		} +		phba->sli4_hba.fcp_cq[idx] = qdesc; + +		/* Create Fast Path FCP WQs */ +		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, +					      phba->sli4_hba.wq_ecount); +		if (!qdesc) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"0503 Failed allocate fast-path FCP " +					"WQ (%d)\n", idx); +			goto out_error; +		} +		phba->sli4_hba.fcp_wq[idx] = qdesc;  	} +  	/* -	 * Create Complete Queues (CQs) +	 * Create Slow Path Completion Queues (CQs)  	 */ -	/* Get CQ depth from module parameter, fake the default for now */ -	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; -	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; -  	/* Create slow-path Mailbox Command Complete Queue */  	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,  				      phba->sli4_hba.cq_ecount);  	if (!qdesc) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0500 Failed allocate slow-path mailbox CQ\n"); -		goto out_free_fp_eq; +		goto out_error;  	}  	phba->sli4_hba.mbx_cq = qdesc; @@ -5741,50 +7155,29 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)  	if (!qdesc) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0501 Failed allocate slow-path ELS CQ\n"); -		goto out_free_mbx_cq; +		goto out_error;  	}  	phba->sli4_hba.els_cq = qdesc; -	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ -	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * -				phba->cfg_fcp_eq_count), GFP_KERNEL); -	if (!phba->sli4_hba.fcp_cq) { -		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"2577 Failed allocate memory for fast-path " -				"CQ record array\n"); -		goto out_free_els_cq; -	} -	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { -		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, -					      phba->sli4_hba.cq_ecount); -		if (!qdesc) { -			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -					"0499 Failed allocate fast-path FCP " -					"CQ (%d)\n", fcp_cqidx); -			goto out_free_fcp_cq; -		} -		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; -	} +	/* +	 * Create Slow Path Work Queues (WQs) +	 */  	/* Create Mailbox Command Queue */ -	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; -	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;  	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,  				      phba->sli4_hba.mq_ecount);  	if (!qdesc) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0505 Failed allocate slow-path MQ\n"); -		goto out_free_fcp_cq; +		goto out_error;  	}  	phba->sli4_hba.mbx_wq = qdesc;  	/* -	 * Create all the Work Queues (WQs) +	 * Create ELS Work Queues  	 */ -	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; -	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;  	/* Create slow-path ELS Work Queue */  	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, @@ -5792,36 +7185,13 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)  	if (!qdesc) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0504 Failed allocate slow-path ELS WQ\n"); -		goto out_free_mbx_wq; +		goto out_error;  	}  	phba->sli4_hba.els_wq = qdesc; -	/* Create fast-path FCP Work Queue(s) */ -	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * -				phba->cfg_fcp_wq_count), GFP_KERNEL); -	if (!phba->sli4_hba.fcp_wq) { -		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"2578 Failed allocate memory for fast-path " -				"WQ record array\n"); -		goto out_free_els_wq; -	} -	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { -		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, -					      phba->sli4_hba.wq_ecount); -		if (!qdesc) { -			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -					"0503 Failed allocate fast-path FCP " -					"WQ (%d)\n", fcp_wqidx); -			goto out_free_fcp_wq; -		} -		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; -	} -  	/*  	 * Create Receive Queue (RQ)  	 */ -	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; -	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;  	/* Create Receive Queue for header */  	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, @@ -5829,7 +7199,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)  	if (!qdesc) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0506 Failed allocate receive HRQ\n"); -		goto out_free_fcp_wq; +		goto out_error;  	}  	phba->sli4_hba.hdr_rq = qdesc; @@ -5839,49 +7209,17 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)  	if (!qdesc) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0507 Failed allocate receive DRQ\n"); -		goto out_free_hdr_rq; +		goto out_error;  	}  	phba->sli4_hba.dat_rq = qdesc; +	/* Create the Queues needed for Flash Optimized Fabric operations */ +	if (phba->cfg_fof) +		lpfc_fof_queue_create(phba);  	return 0; -out_free_hdr_rq: -	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); -	phba->sli4_hba.hdr_rq = NULL; -out_free_fcp_wq: -	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { -		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); -		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; -	} -	kfree(phba->sli4_hba.fcp_wq); -out_free_els_wq: -	lpfc_sli4_queue_free(phba->sli4_hba.els_wq); -	phba->sli4_hba.els_wq = NULL; -out_free_mbx_wq: -	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); -	phba->sli4_hba.mbx_wq = NULL; -out_free_fcp_cq: -	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { -		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); -		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; -	} -	kfree(phba->sli4_hba.fcp_cq); -out_free_els_cq: -	lpfc_sli4_queue_free(phba->sli4_hba.els_cq); -	phba->sli4_hba.els_cq = NULL; -out_free_mbx_cq: -	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); -	phba->sli4_hba.mbx_cq = NULL; -out_free_fp_eq: -	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { -		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); -		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; -	} -	kfree(phba->sli4_hba.fp_eq); -out_free_sp_eq: -	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); -	phba->sli4_hba.sp_eq = NULL;  out_error: +	lpfc_sli4_queue_destroy(phba);  	return -ENOMEM;  } @@ -5894,57 +7232,95 @@ out_error:   *   * Return codes   *      0 - successful - *      -ENOMEM - No availble memory + *      -ENOMEM - No available memory   *      -EIO - The mailbox failed to complete successfully.   **/ -static void +void  lpfc_sli4_queue_destroy(struct lpfc_hba *phba)  { -	int fcp_qidx; +	int idx; + +	if (phba->cfg_fof) +		lpfc_fof_queue_destroy(phba); + +	if (phba->sli4_hba.hba_eq != NULL) { +		/* Release HBA event queue */ +		for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { +			if (phba->sli4_hba.hba_eq[idx] != NULL) { +				lpfc_sli4_queue_free( +					phba->sli4_hba.hba_eq[idx]); +				phba->sli4_hba.hba_eq[idx] = NULL; +			} +		} +		kfree(phba->sli4_hba.hba_eq); +		phba->sli4_hba.hba_eq = NULL; +	} + +	if (phba->sli4_hba.fcp_cq != NULL) { +		/* Release FCP completion queue */ +		for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { +			if (phba->sli4_hba.fcp_cq[idx] != NULL) { +				lpfc_sli4_queue_free( +					phba->sli4_hba.fcp_cq[idx]); +				phba->sli4_hba.fcp_cq[idx] = NULL; +			} +		} +		kfree(phba->sli4_hba.fcp_cq); +		phba->sli4_hba.fcp_cq = NULL; +	} + +	if (phba->sli4_hba.fcp_wq != NULL) { +		/* Release FCP work queue */ +		for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { +			if (phba->sli4_hba.fcp_wq[idx] != NULL) { +				lpfc_sli4_queue_free( +					phba->sli4_hba.fcp_wq[idx]); +				phba->sli4_hba.fcp_wq[idx] = NULL; +			} +		} +		kfree(phba->sli4_hba.fcp_wq); +		phba->sli4_hba.fcp_wq = NULL; +	} + +	/* Release FCP CQ mapping array */ +	if (phba->sli4_hba.fcp_cq_map != NULL) { +		kfree(phba->sli4_hba.fcp_cq_map); +		phba->sli4_hba.fcp_cq_map = NULL; +	}  	/* Release mailbox command work queue */ -	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); -	phba->sli4_hba.mbx_wq = NULL; +	if (phba->sli4_hba.mbx_wq != NULL) { +		lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); +		phba->sli4_hba.mbx_wq = NULL; +	}  	/* Release ELS work queue */ -	lpfc_sli4_queue_free(phba->sli4_hba.els_wq); -	phba->sli4_hba.els_wq = NULL; - -	/* Release FCP work queue */ -	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) -		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); -	kfree(phba->sli4_hba.fcp_wq); -	phba->sli4_hba.fcp_wq = NULL; +	if (phba->sli4_hba.els_wq != NULL) { +		lpfc_sli4_queue_free(phba->sli4_hba.els_wq); +		phba->sli4_hba.els_wq = NULL; +	}  	/* Release unsolicited receive queue */ -	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); -	phba->sli4_hba.hdr_rq = NULL; -	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); -	phba->sli4_hba.dat_rq = NULL; +	if (phba->sli4_hba.hdr_rq != NULL) { +		lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); +		phba->sli4_hba.hdr_rq = NULL; +	} +	if (phba->sli4_hba.dat_rq != NULL) { +		lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); +		phba->sli4_hba.dat_rq = NULL; +	}  	/* Release ELS complete queue */ -	lpfc_sli4_queue_free(phba->sli4_hba.els_cq); -	phba->sli4_hba.els_cq = NULL; +	if (phba->sli4_hba.els_cq != NULL) { +		lpfc_sli4_queue_free(phba->sli4_hba.els_cq); +		phba->sli4_hba.els_cq = NULL; +	}  	/* Release mailbox command complete queue */ -	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); -	phba->sli4_hba.mbx_cq = NULL; - -	/* Release FCP response complete queue */ -	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) -		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); -	kfree(phba->sli4_hba.fcp_cq); -	phba->sli4_hba.fcp_cq = NULL; - -	/* Release fast-path event queue */ -	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) -		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); -	kfree(phba->sli4_hba.fp_eq); -	phba->sli4_hba.fp_eq = NULL; - -	/* Release slow-path event queue */ -	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); -	phba->sli4_hba.sp_eq = NULL; +	if (phba->sli4_hba.mbx_cq != NULL) { +		lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); +		phba->sli4_hba.mbx_cq = NULL; +	}  	return;  } @@ -5958,60 +7334,177 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)   *   * Return codes   *      0 - successful - *      -ENOMEM - No availble memory + *      -ENOMEM - No available memory   *      -EIO - The mailbox failed to complete successfully.   **/  int  lpfc_sli4_queue_setup(struct lpfc_hba *phba)  { +	struct lpfc_sli *psli = &phba->sli; +	struct lpfc_sli_ring *pring;  	int rc = -ENOMEM;  	int fcp_eqidx, fcp_cqidx, fcp_wqidx;  	int fcp_cq_index = 0; +	uint32_t shdr_status, shdr_add_status; +	union lpfc_sli4_cfg_shdr *shdr; +	LPFC_MBOXQ_t *mboxq; +	uint32_t length; -	/* -	 * Set up Event Queues (EQs) -	 */ - -	/* Set up slow-path event queue */ -	if (!phba->sli4_hba.sp_eq) { +	/* Check for dual-ULP support */ +	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); +	if (!mboxq) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"0520 Slow-path EQ not allocated\n"); -		goto out_error; +				"3249 Unable to allocate memory for " +				"QUERY_FW_CFG mailbox command\n"); +		return -ENOMEM;  	} -	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, -			    LPFC_SP_DEF_IMAX); -	if (rc) { +	length = (sizeof(struct lpfc_mbx_query_fw_config) - +		  sizeof(struct lpfc_sli4_cfg_mhdr)); +	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, +			 LPFC_MBOX_OPCODE_QUERY_FW_CFG, +			 length, LPFC_SLI4_MBX_EMBED); + +	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + +	shdr = (union lpfc_sli4_cfg_shdr *) +			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr; +	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); +	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); +	if (shdr_status || shdr_add_status || rc) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"0521 Failed setup of slow-path EQ: " -				"rc = 0x%x\n", rc); +				"3250 QUERY_FW_CFG mailbox failed with status " +				"x%x add_status x%x, mbx status x%x\n", +				shdr_status, shdr_add_status, rc); +		if (rc != MBX_TIMEOUT) +			mempool_free(mboxq, phba->mbox_mem_pool); +		rc = -ENXIO;  		goto out_error;  	} + +	phba->sli4_hba.fw_func_mode = +			mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; +	phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; +	phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;  	lpfc_printf_log(phba, KERN_INFO, LOG_INIT, -			"2583 Slow-path EQ setup: queue-id=%d\n", -			phba->sli4_hba.sp_eq->queue_id); +			"3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " +			"ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, +			phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); + +	if (rc != MBX_TIMEOUT) +		mempool_free(mboxq, phba->mbox_mem_pool); -	/* Set up fast-path event queue */ -	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { -		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { +	/* +	 * Set up HBA Event Queues (EQs) +	 */ + +	/* Set up HBA event queue */ +	if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) { +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"3147 Fast-path EQs not allocated\n"); +		rc = -ENOMEM; +		goto out_error; +	} +	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { +		if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {  			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  					"0522 Fast-path EQ (%d) not "  					"allocated\n", fcp_eqidx); -			goto out_destroy_fp_eq; +			rc = -ENOMEM; +			goto out_destroy_hba_eq;  		} -		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], -				    phba->cfg_fcp_imax); +		rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx], +			 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));  		if (rc) {  			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  					"0523 Failed setup of fast-path EQ "  					"(%d), rc = 0x%x\n", fcp_eqidx, rc); -			goto out_destroy_fp_eq; +			goto out_destroy_hba_eq;  		}  		lpfc_printf_log(phba, KERN_INFO, LOG_INIT, -				"2584 Fast-path EQ setup: " +				"2584 HBA EQ setup: "  				"queue[%d]-id=%d\n", fcp_eqidx, -				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); +				phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id); +	} + +	/* Set up fast-path FCP Response Complete Queue */ +	if (!phba->sli4_hba.fcp_cq) { +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"3148 Fast-path FCP CQ array not " +				"allocated\n"); +		rc = -ENOMEM; +		goto out_destroy_hba_eq; +	} + +	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) { +		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"0526 Fast-path FCP CQ (%d) not " +					"allocated\n", fcp_cqidx); +			rc = -ENOMEM; +			goto out_destroy_fcp_cq; +		} +		rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], +			phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP); +		if (rc) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"0527 Failed setup of fast-path FCP " +					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); +			goto out_destroy_fcp_cq; +		} + +		/* Setup fcp_cq_map for fast lookup */ +		phba->sli4_hba.fcp_cq_map[fcp_cqidx] = +				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id; + +		lpfc_printf_log(phba, KERN_INFO, LOG_INIT, +				"2588 FCP CQ setup: cq[%d]-id=%d, " +				"parent seq[%d]-id=%d\n", +				fcp_cqidx, +				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, +				fcp_cqidx, +				phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id); +	} + +	/* Set up fast-path FCP Work Queue */ +	if (!phba->sli4_hba.fcp_wq) { +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"3149 Fast-path FCP WQ array not " +				"allocated\n"); +		rc = -ENOMEM; +		goto out_destroy_fcp_cq;  	} +	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) { +		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"0534 Fast-path FCP WQ (%d) not " +					"allocated\n", fcp_wqidx); +			rc = -ENOMEM; +			goto out_destroy_fcp_wq; +		} +		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], +				    phba->sli4_hba.fcp_cq[fcp_wqidx], +				    LPFC_FCP); +		if (rc) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"0535 Failed setup of fast-path FCP " +					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); +			goto out_destroy_fcp_wq; +		} + +		/* Bind this WQ to the next FCP ring */ +		pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx]; +		pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx]; +		phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring; + +		lpfc_printf_log(phba, KERN_INFO, LOG_INIT, +				"2591 FCP WQ setup: wq[%d]-id=%d, " +				"parent cq[%d]-id=%d\n", +				fcp_wqidx, +				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, +				fcp_cq_index, +				phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id); +	}  	/*  	 * Set up Complete Queues (CQs)  	 */ @@ -6020,29 +7513,31 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)  	if (!phba->sli4_hba.mbx_cq) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0528 Mailbox CQ not allocated\n"); -		goto out_destroy_fp_eq; +		rc = -ENOMEM; +		goto out_destroy_fcp_wq;  	} -	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, -			    LPFC_MCQ, LPFC_MBOX); +	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, +			phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);  	if (rc) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0529 Failed setup of slow-path mailbox CQ: "  				"rc = 0x%x\n", rc); -		goto out_destroy_fp_eq; +		goto out_destroy_fcp_wq;  	}  	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,  			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",  			phba->sli4_hba.mbx_cq->queue_id, -			phba->sli4_hba.sp_eq->queue_id); +			phba->sli4_hba.hba_eq[0]->queue_id);  	/* Set up slow-path ELS Complete Queue */  	if (!phba->sli4_hba.els_cq) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0530 ELS CQ not allocated\n"); +		rc = -ENOMEM;  		goto out_destroy_mbx_cq;  	} -	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, -			    LPFC_WCQ, LPFC_ELS); +	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, +			phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);  	if (rc) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0531 Failed setup of slow-path ELS CQ: " @@ -6052,33 +7547,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)  	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,  			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",  			phba->sli4_hba.els_cq->queue_id, -			phba->sli4_hba.sp_eq->queue_id); - -	/* Set up fast-path FCP Response Complete Queue */ -	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { -		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { -			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -					"0526 Fast-path FCP CQ (%d) not " -					"allocated\n", fcp_cqidx); -			goto out_destroy_fcp_cq; -		} -		rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], -				    phba->sli4_hba.fp_eq[fcp_cqidx], -				    LPFC_WCQ, LPFC_FCP); -		if (rc) { -			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -					"0527 Failed setup of fast-path FCP " -					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); -			goto out_destroy_fcp_cq; -		} -		lpfc_printf_log(phba, KERN_INFO, LOG_INIT, -				"2588 FCP CQ setup: cq[%d]-id=%d, " -				"parent eq[%d]-id=%d\n", -				fcp_cqidx, -				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, -				fcp_cqidx, -				phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); -	} +			phba->sli4_hba.hba_eq[0]->queue_id);  	/*  	 * Set up all the Work Queues (WQs) @@ -6088,7 +7557,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)  	if (!phba->sli4_hba.mbx_wq) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0538 Slow-path MQ not allocated\n"); -		goto out_destroy_fcp_cq; +		rc = -ENOMEM; +		goto out_destroy_els_cq;  	}  	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,  			    phba->sli4_hba.mbx_cq, LPFC_MBOX); @@ -6096,7 +7566,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0539 Failed setup of slow-path MQ: "  				"rc = 0x%x\n", rc); -		goto out_destroy_fcp_cq; +		goto out_destroy_els_cq;  	}  	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,  			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", @@ -6107,6 +7577,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)  	if (!phba->sli4_hba.els_wq) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0536 Slow-path ELS WQ not allocated\n"); +		rc = -ENOMEM;  		goto out_destroy_mbx_wq;  	}  	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, @@ -6117,47 +7588,30 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)  				"rc = 0x%x\n", rc);  		goto out_destroy_mbx_wq;  	} + +	/* Bind this WQ to the ELS ring */ +	pring = &psli->ring[LPFC_ELS_RING]; +	pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq; +	phba->sli4_hba.els_cq->pring = pring; +  	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,  			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",  			phba->sli4_hba.els_wq->queue_id,  			phba->sli4_hba.els_cq->queue_id); -	/* Set up fast-path FCP Work Queue */ -	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { -		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { -			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -					"0534 Fast-path FCP WQ (%d) not " -					"allocated\n", fcp_wqidx); -			goto out_destroy_fcp_wq; -		} -		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], -				    phba->sli4_hba.fcp_cq[fcp_cq_index], -				    LPFC_FCP); -		if (rc) { -			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -					"0535 Failed setup of fast-path FCP " -					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); -			goto out_destroy_fcp_wq; -		} -		lpfc_printf_log(phba, KERN_INFO, LOG_INIT, -				"2591 FCP WQ setup: wq[%d]-id=%d, " -				"parent cq[%d]-id=%d\n", -				fcp_wqidx, -				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, -				fcp_cq_index, -				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); -		/* Round robin FCP Work Queue's Completion Queue assignment */ -		fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); -	} -  	/*  	 * Create Receive Queue (RQ)  	 */  	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"0540 Receive Queue not allocated\n"); -		goto out_destroy_fcp_wq; +		rc = -ENOMEM; +		goto out_destroy_els_wq;  	} + +	lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); +	lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ); +  	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,  			    phba->sli4_hba.els_cq, LPFC_USOL);  	if (rc) { @@ -6166,30 +7620,44 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)  				"rc = 0x%x\n", rc);  		goto out_destroy_fcp_wq;  	} +  	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,  			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "  			"parent cq-id=%d\n",  			phba->sli4_hba.hdr_rq->queue_id,  			phba->sli4_hba.dat_rq->queue_id,  			phba->sli4_hba.els_cq->queue_id); + +	if (phba->cfg_fof) { +		rc = lpfc_fof_queue_setup(phba); +		if (rc) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"0549 Failed setup of FOF Queues: " +					"rc = 0x%x\n", rc); +			goto out_destroy_els_rq; +		} +	}  	return 0; -out_destroy_fcp_wq: -	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) -		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); +out_destroy_els_rq: +	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); +out_destroy_els_wq:  	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);  out_destroy_mbx_wq:  	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); -out_destroy_fcp_cq: -	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) -		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); +out_destroy_els_cq:  	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);  out_destroy_mbx_cq:  	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); -out_destroy_fp_eq: +out_destroy_fcp_wq: +	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) +		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); +out_destroy_fcp_cq: +	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) +		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); +out_destroy_hba_eq:  	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) -		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); -	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); +		lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);  out_error:  	return rc;  } @@ -6203,7 +7671,7 @@ out_error:   *   * Return codes   *      0 - successful - *      -ENOMEM - No availble memory + *      -ENOMEM - No available memory   *      -EIO - The mailbox failed to complete successfully.   **/  void @@ -6211,6 +7679,9 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)  {  	int fcp_qidx; +	/* Unset the queues created for Flash Optimized Fabric operations */ +	if (phba->cfg_fof) +		lpfc_fof_queue_destroy(phba);  	/* Unset mailbox command work queue */  	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);  	/* Unset ELS work queue */ @@ -6218,20 +7689,27 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)  	/* Unset unsolicited receive queue */  	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);  	/* Unset FCP work queue */ -	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) -		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); +	if (phba->sli4_hba.fcp_wq) { +		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; +		     fcp_qidx++) +			lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); +	}  	/* Unset mailbox command complete queue */  	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);  	/* Unset ELS complete queue */  	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);  	/* Unset FCP response complete queue */ -	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) -		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); +	if (phba->sli4_hba.fcp_cq) { +		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; +		     fcp_qidx++) +			lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); +	}  	/* Unset fast-path event queue */ -	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) -		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); -	/* Unset slow-path event queue */ -	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); +	if (phba->sli4_hba.hba_eq) { +		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; +		     fcp_qidx++) +			lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]); +	}  }  /** @@ -6248,7 +7726,7 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)   *   * Return codes   *      0 - successful - *      -ENOMEM - No availble memory + *      -ENOMEM - No available memory   **/  static int  lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) @@ -6409,114 +7887,142 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)   *   * Return codes   *      0 - successful - *      -ENOMEM - No availble memory + *      -ENOMEM - No available memory   *      -EIO - The mailbox failed to complete successfully.   **/  int  lpfc_pci_function_reset(struct lpfc_hba *phba)  {  	LPFC_MBOXQ_t *mboxq; -	uint32_t rc = 0; -	uint32_t shdr_status, shdr_add_status; -	union lpfc_sli4_cfg_shdr *shdr; - -	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); -	if (!mboxq) { -		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"0494 Unable to allocate memory for issuing " -				"SLI_FUNCTION_RESET mailbox command\n"); -		return -ENOMEM; -	} - -	/* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ -	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, -			 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, -			 LPFC_SLI4_MBX_EMBED); -	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); -	shdr = (union lpfc_sli4_cfg_shdr *) -		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr; -	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); -	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); -	if (rc != MBX_TIMEOUT) -		mempool_free(mboxq, phba->mbox_mem_pool); -	if (shdr_status || shdr_add_status || rc) { -		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"0495 SLI_FUNCTION_RESET mailbox failed with " -				"status x%x add_status x%x, mbx status x%x\n", -				shdr_status, shdr_add_status, rc); -		rc = -ENXIO; -	} -	return rc; -} - -/** - * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands - * @phba: pointer to lpfc hba data structure. - * @cnt: number of nop mailbox commands to send. - * - * This routine is invoked to send a number @cnt of NOP mailbox command and - * wait for each command to complete. - * - * Return: the number of NOP mailbox command completed. - **/ -static int -lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) -{ -	LPFC_MBOXQ_t *mboxq; -	int length, cmdsent; -	uint32_t mbox_tmo; -	uint32_t rc = 0; +	uint32_t rc = 0, if_type;  	uint32_t shdr_status, shdr_add_status; +	uint32_t rdy_chk, num_resets = 0, reset_again = 0;  	union lpfc_sli4_cfg_shdr *shdr; +	struct lpfc_register reg_data; +	uint16_t devid; -	if (cnt == 0) { -		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, -				"2518 Requested to send 0 NOP mailbox cmd\n"); -		return cnt; -	} - -	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); -	if (!mboxq) { -		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"2519 Unable to allocate memory for issuing " -				"NOP mailbox command\n"); -		return 0; -	} - -	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */ -	length = (sizeof(struct lpfc_mbx_nop) - -		  sizeof(struct lpfc_sli4_cfg_mhdr)); -	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, -			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); +	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); +	switch (if_type) { +	case LPFC_SLI_INTF_IF_TYPE_0: +		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, +						       GFP_KERNEL); +		if (!mboxq) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"0494 Unable to allocate memory for " +					"issuing SLI_FUNCTION_RESET mailbox " +					"command\n"); +			return -ENOMEM; +		} -	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); -	for (cmdsent = 0; cmdsent < cnt; cmdsent++) { -		if (!phba->sli4_hba.intr_enable) -			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); -		else -			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); -		if (rc == MBX_TIMEOUT) -			break; -		/* Check return status */ +		/* Setup PCI function reset mailbox-ioctl command */ +		lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, +				 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, +				 LPFC_SLI4_MBX_EMBED); +		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);  		shdr = (union lpfc_sli4_cfg_shdr *)  			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;  		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);  		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,  					 &shdr->response); +		if (rc != MBX_TIMEOUT) +			mempool_free(mboxq, phba->mbox_mem_pool);  		if (shdr_status || shdr_add_status || rc) { -			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, -					"2520 NOP mailbox command failed " -					"status x%x add_status x%x mbx " -					"status x%x\n", shdr_status, -					shdr_add_status, rc); -			break; +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"0495 SLI_FUNCTION_RESET mailbox " +					"failed with status x%x add_status x%x," +					" mbx status x%x\n", +					shdr_status, shdr_add_status, rc); +			rc = -ENXIO;  		} +		break; +	case LPFC_SLI_INTF_IF_TYPE_2: +		for (num_resets = 0; +		     num_resets < MAX_IF_TYPE_2_RESETS; +		     num_resets++) { +			reg_data.word0 = 0; +			bf_set(lpfc_sliport_ctrl_end, ®_data, +			       LPFC_SLIPORT_LITTLE_ENDIAN); +			bf_set(lpfc_sliport_ctrl_ip, ®_data, +			       LPFC_SLIPORT_INIT_PORT); +			writel(reg_data.word0, phba->sli4_hba.u.if_type2. +			       CTRLregaddr); +			/* flush */ +			pci_read_config_word(phba->pcidev, +					     PCI_DEVICE_ID, &devid); +			/* +			 * Poll the Port Status Register and wait for RDY for +			 * up to 10 seconds.  If the port doesn't respond, treat +			 * it as an error.  If the port responds with RN, start +			 * the loop again. +			 */ +			for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { +				msleep(10); +				if (lpfc_readl(phba->sli4_hba.u.if_type2. +					      STATUSregaddr, ®_data.word0)) { +					rc = -ENODEV; +					goto out; +				} +				if (bf_get(lpfc_sliport_status_rn, ®_data)) +					reset_again++; +				if (bf_get(lpfc_sliport_status_rdy, ®_data)) +					break; +			} + +			/* +			 * If the port responds to the init request with +			 * reset needed, delay for a bit and restart the loop. +			 */ +			if (reset_again && (rdy_chk < 1000)) { +				msleep(10); +				reset_again = 0; +				continue; +			} + +			/* Detect any port errors. */ +			if ((bf_get(lpfc_sliport_status_err, ®_data)) || +			    (rdy_chk >= 1000)) { +				phba->work_status[0] = readl( +					phba->sli4_hba.u.if_type2.ERR1regaddr); +				phba->work_status[1] = readl( +					phba->sli4_hba.u.if_type2.ERR2regaddr); +				lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"2890 Port error detected during port " +					"reset(%d): wait_tmo:%d ms, " +					"port status reg 0x%x, " +					"error 1=0x%x, error 2=0x%x\n", +					num_resets, rdy_chk*10, +					reg_data.word0, +					phba->work_status[0], +					phba->work_status[1]); +				rc = -ENODEV; +			} + +			/* +			 * Terminate the outer loop provided the Port indicated +			 * ready within 10 seconds. +			 */ +			if (rdy_chk < 1000) +				break; +		} +		/* delay driver action following IF_TYPE_2 function reset */ +		msleep(100); +		break; +	case LPFC_SLI_INTF_IF_TYPE_1: +	default: +		break;  	} -	if (rc != MBX_TIMEOUT) -		mempool_free(mboxq, phba->mbox_mem_pool); +out: +	/* Catch the not-ready port failure after a port reset. */ +	if (num_resets >= MAX_IF_TYPE_2_RESETS) { +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"3317 HBA not functional: IP Reset Failed " +				"after (%d) retries, try: " +				"echo fw_reset > board_mode\n", num_resets); +		rc = -ENODEV; +	} -	return cmdsent; +	return rc;  }  /** @@ -6536,6 +8042,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)  	struct pci_dev *pdev;  	unsigned long bar0map_len, bar1map_len, bar2map_len;  	int error = -ENODEV; +	uint32_t if_type;  	/* Obtain PCI device reference */  	if (!phba->pcidev) @@ -6552,61 +8059,110 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)  		}  	} -	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the -	 * number of bytes required by each mapping. They are actually -	 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device. +	/* +	 * The BARs and register set definitions and offset locations are +	 * dependent on the if_type.  	 */ -	if (pci_resource_start(pdev, 0)) { -		phba->pci_bar0_map = pci_resource_start(pdev, 0); -		bar0map_len = pci_resource_len(pdev, 0); -	} else { -		phba->pci_bar0_map = pci_resource_start(pdev, 1); -		bar0map_len = pci_resource_len(pdev, 1); +	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, +				  &phba->sli4_hba.sli_intf.word0)) { +		return error; +	} + +	/* There is no SLI3 failback for SLI4 devices. */ +	if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != +	    LPFC_SLI_INTF_VALID) { +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"2894 SLI_INTF reg contents invalid " +				"sli_intf reg 0x%x\n", +				phba->sli4_hba.sli_intf.word0); +		return error;  	} -	phba->pci_bar1_map = pci_resource_start(pdev, 2); -	bar1map_len = pci_resource_len(pdev, 2); -	phba->pci_bar2_map = pci_resource_start(pdev, 4); -	bar2map_len = pci_resource_len(pdev, 4); +	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); +	/* +	 * Get the bus address of SLI4 device Bar regions and the +	 * number of bytes required by each mapping. The mapping of the +	 * particular PCI BARs regions is dependent on the type of +	 * SLI4 device. +	 */ +	if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { +		phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); +		bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); -	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ -	phba->sli4_hba.conf_regs_memmap_p = +		/* +		 * Map SLI4 PCI Config Space Register base to a kernel virtual +		 * addr +		 */ +		phba->sli4_hba.conf_regs_memmap_p = +			ioremap(phba->pci_bar0_map, bar0map_len); +		if (!phba->sli4_hba.conf_regs_memmap_p) { +			dev_printk(KERN_ERR, &pdev->dev, +				   "ioremap failed for SLI4 PCI config " +				   "registers.\n"); +			goto out; +		} +		phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; +		/* Set up BAR0 PCI config space register memory map */ +		lpfc_sli4_bar0_register_memmap(phba, if_type); +	} else { +		phba->pci_bar0_map = pci_resource_start(pdev, 1); +		bar0map_len = pci_resource_len(pdev, 1); +		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { +			dev_printk(KERN_ERR, &pdev->dev, +			   "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); +			goto out; +		} +		phba->sli4_hba.conf_regs_memmap_p =  				ioremap(phba->pci_bar0_map, bar0map_len); -	if (!phba->sli4_hba.conf_regs_memmap_p) { -		dev_printk(KERN_ERR, &pdev->dev, -			   "ioremap failed for SLI4 PCI config registers.\n"); -		goto out; +		if (!phba->sli4_hba.conf_regs_memmap_p) { +			dev_printk(KERN_ERR, &pdev->dev, +				"ioremap failed for SLI4 PCI config " +				"registers.\n"); +				goto out; +		} +		lpfc_sli4_bar0_register_memmap(phba, if_type);  	} -	/* Map SLI4 HBA Control Register base to a kernel virtual address. */ -	phba->sli4_hba.ctrl_regs_memmap_p = +	if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && +	    (pci_resource_start(pdev, PCI_64BIT_BAR2))) { +		/* +		 * Map SLI4 if type 0 HBA Control Register base to a kernel +		 * virtual address and setup the registers. +		 */ +		phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); +		bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); +		phba->sli4_hba.ctrl_regs_memmap_p =  				ioremap(phba->pci_bar1_map, bar1map_len); -	if (!phba->sli4_hba.ctrl_regs_memmap_p) { -		dev_printk(KERN_ERR, &pdev->dev, +		if (!phba->sli4_hba.ctrl_regs_memmap_p) { +			dev_printk(KERN_ERR, &pdev->dev,  			   "ioremap failed for SLI4 HBA control registers.\n"); -		goto out_iounmap_conf; +			goto out_iounmap_conf; +		} +		phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; +		lpfc_sli4_bar1_register_memmap(phba);  	} -	/* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ -	phba->sli4_hba.drbl_regs_memmap_p = +	if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && +	    (pci_resource_start(pdev, PCI_64BIT_BAR4))) { +		/* +		 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel +		 * virtual address and setup the registers. +		 */ +		phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); +		bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); +		phba->sli4_hba.drbl_regs_memmap_p =  				ioremap(phba->pci_bar2_map, bar2map_len); -	if (!phba->sli4_hba.drbl_regs_memmap_p) { -		dev_printk(KERN_ERR, &pdev->dev, +		if (!phba->sli4_hba.drbl_regs_memmap_p) { +			dev_printk(KERN_ERR, &pdev->dev,  			   "ioremap failed for SLI4 HBA doorbell registers.\n"); -		goto out_iounmap_ctrl; +			goto out_iounmap_ctrl; +		} +		phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; +		error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); +		if (error) +			goto out_iounmap_all;  	} -	/* Set up BAR0 PCI config space register memory map */ -	lpfc_sli4_bar0_register_memmap(phba); - -	/* Set up BAR1 register memory map */ -	lpfc_sli4_bar1_register_memmap(phba); - -	/* Set up BAR2 register memory map */ -	error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); -	if (error) -		goto out_iounmap_all; -  	return 0;  out_iounmap_all: @@ -6629,22 +8185,25 @@ out:  static void  lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)  { -	struct pci_dev *pdev; - -	/* Obtain PCI device reference */ -	if (!phba->pcidev) -		return; -	else -		pdev = phba->pcidev; - -	/* Free coherent DMA memory allocated */ +	uint32_t if_type; +	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); -	/* Unmap I/O memory space */ -	iounmap(phba->sli4_hba.drbl_regs_memmap_p); -	iounmap(phba->sli4_hba.ctrl_regs_memmap_p); -	iounmap(phba->sli4_hba.conf_regs_memmap_p); - -	return; +	switch (if_type) { +	case LPFC_SLI_INTF_IF_TYPE_0: +		iounmap(phba->sli4_hba.drbl_regs_memmap_p); +		iounmap(phba->sli4_hba.ctrl_regs_memmap_p); +		iounmap(phba->sli4_hba.conf_regs_memmap_p); +		break; +	case LPFC_SLI_INTF_IF_TYPE_2: +		iounmap(phba->sli4_hba.conf_regs_memmap_p); +		break; +	case LPFC_SLI_INTF_IF_TYPE_1: +	default: +		dev_printk(KERN_ERR, &phba->pcidev->dev, +			   "FATAL - unsupported SLI4 interface type - %d\n", +			   if_type); +		break; +	}  }  /** @@ -6929,6 +8488,287 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)  }  /** + * lpfc_find_next_cpu - Find next available CPU that matches the phys_id + * @phba: pointer to lpfc hba data structure. + * + * Find next available CPU to use for IRQ to CPU affinity. + */ +static int +lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id) +{ +	struct lpfc_vector_map_info *cpup; +	int cpu; + +	cpup = phba->sli4_hba.cpu_map; +	for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { +		/* CPU must be online */ +		if (cpu_online(cpu)) { +			if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && +			    (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) && +			    (cpup->phys_id == phys_id)) { +				return cpu; +			} +		} +		cpup++; +	} + +	/* +	 * If we get here, we have used ALL CPUs for the specific +	 * phys_id. Now we need to clear out lpfc_used_cpu and start +	 * reusing CPUs. +	 */ + +	for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { +		if (lpfc_used_cpu[cpu] == phys_id) +			lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; +	} + +	cpup = phba->sli4_hba.cpu_map; +	for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { +		/* CPU must be online */ +		if (cpu_online(cpu)) { +			if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && +			    (cpup->phys_id == phys_id)) { +				return cpu; +			} +		} +		cpup++; +	} +	return LPFC_VECTOR_MAP_EMPTY; +} + +/** + * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors + * @phba:	pointer to lpfc hba data structure. + * @vectors:	number of HBA vectors + * + * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector + * affinization across multple physical CPUs (numa nodes). + * In addition, this routine will assign an IO channel for each CPU + * to use when issuing I/Os. + */ +static int +lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) +{ +	int i, idx, saved_chann, used_chann, cpu, phys_id; +	int max_phys_id, min_phys_id; +	int num_io_channel, first_cpu, chan; +	struct lpfc_vector_map_info *cpup; +#ifdef CONFIG_X86 +	struct cpuinfo_x86 *cpuinfo; +#endif +	struct cpumask *mask; +	uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1]; + +	/* If there is no mapping, just return */ +	if (!phba->cfg_fcp_cpu_map) +		return 1; + +	/* Init cpu_map array */ +	memset(phba->sli4_hba.cpu_map, 0xff, +	       (sizeof(struct lpfc_vector_map_info) * +		phba->sli4_hba.num_present_cpu)); + +	max_phys_id = 0; +	min_phys_id = 0xff; +	phys_id = 0; +	num_io_channel = 0; +	first_cpu = LPFC_VECTOR_MAP_EMPTY; + +	/* Update CPU map with physical id and core id of each CPU */ +	cpup = phba->sli4_hba.cpu_map; +	for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { +#ifdef CONFIG_X86 +		cpuinfo = &cpu_data(cpu); +		cpup->phys_id = cpuinfo->phys_proc_id; +		cpup->core_id = cpuinfo->cpu_core_id; +#else +		/* No distinction between CPUs for other platforms */ +		cpup->phys_id = 0; +		cpup->core_id = 0; +#endif + +		lpfc_printf_log(phba, KERN_INFO, LOG_INIT, +				"3328 CPU physid %d coreid %d\n", +				cpup->phys_id, cpup->core_id); + +		if (cpup->phys_id > max_phys_id) +			max_phys_id = cpup->phys_id; +		if (cpup->phys_id < min_phys_id) +			min_phys_id = cpup->phys_id; +		cpup++; +	} + +	phys_id = min_phys_id; +	/* Now associate the HBA vectors with specific CPUs */ +	for (idx = 0; idx < vectors; idx++) { +		cpup = phba->sli4_hba.cpu_map; +		cpu = lpfc_find_next_cpu(phba, phys_id); +		if (cpu == LPFC_VECTOR_MAP_EMPTY) { + +			/* Try for all phys_id's */ +			for (i = 1; i < max_phys_id; i++) { +				phys_id++; +				if (phys_id > max_phys_id) +					phys_id = min_phys_id; +				cpu = lpfc_find_next_cpu(phba, phys_id); +				if (cpu == LPFC_VECTOR_MAP_EMPTY) +					continue; +				goto found; +			} + +			/* Use round robin for scheduling */ +			phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; +			chan = 0; +			cpup = phba->sli4_hba.cpu_map; +			for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { +				cpup->channel_id = chan; +				cpup++; +				chan++; +				if (chan >= phba->cfg_fcp_io_channel) +					chan = 0; +			} + +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"3329 Cannot set affinity:" +					"Error mapping vector %d (%d)\n", +					idx, vectors); +			return 0; +		} +found: +		cpup += cpu; +		if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP) +			lpfc_used_cpu[cpu] = phys_id; + +		/* Associate vector with selected CPU */ +		cpup->irq = phba->sli4_hba.msix_entries[idx].vector; + +		/* Associate IO channel with selected CPU */ +		cpup->channel_id = idx; +		num_io_channel++; + +		if (first_cpu == LPFC_VECTOR_MAP_EMPTY) +			first_cpu = cpu; + +		/* Now affinitize to the selected CPU */ +		mask = &cpup->maskbits; +		cpumask_clear(mask); +		cpumask_set_cpu(cpu, mask); +		i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx]. +					  vector, mask); + +		lpfc_printf_log(phba, KERN_INFO, LOG_INIT, +				"3330 Set Affinity: CPU %d channel %d " +				"irq %d (%x)\n", +				cpu, cpup->channel_id, +				phba->sli4_hba.msix_entries[idx].vector, i); + +		/* Spread vector mapping across multple physical CPU nodes */ +		phys_id++; +		if (phys_id > max_phys_id) +			phys_id = min_phys_id; +	} + +	/* +	 * Finally fill in the IO channel for any remaining CPUs. +	 * At this point, all IO channels have been assigned to a specific +	 * MSIx vector, mapped to a specific CPU. +	 * Base the remaining IO channel assigned, to IO channels already +	 * assigned to other CPUs on the same phys_id. +	 */ +	for (i = min_phys_id; i <= max_phys_id; i++) { +		/* +		 * If there are no io channels already mapped to +		 * this phys_id, just round robin thru the io_channels. +		 * Setup chann[] for round robin. +		 */ +		for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) +			chann[idx] = idx; + +		saved_chann = 0; +		used_chann = 0; + +		/* +		 * First build a list of IO channels already assigned +		 * to this phys_id before reassigning the same IO +		 * channels to the remaining CPUs. +		 */ +		cpup = phba->sli4_hba.cpu_map; +		cpu = first_cpu; +		cpup += cpu; +		for (idx = 0; idx < phba->sli4_hba.num_present_cpu; +		     idx++) { +			if (cpup->phys_id == i) { +				/* +				 * Save any IO channels that are +				 * already mapped to this phys_id. +				 */ +				if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { +					chann[saved_chann] = +						cpup->channel_id; +					saved_chann++; +					goto out; +				} + +				/* See if we are using round-robin */ +				if (saved_chann == 0) +					saved_chann = +						phba->cfg_fcp_io_channel; + +				/* Associate next IO channel with CPU */ +				cpup->channel_id = chann[used_chann]; +				num_io_channel++; +				used_chann++; +				if (used_chann == saved_chann) +					used_chann = 0; + +				lpfc_printf_log(phba, KERN_INFO, LOG_INIT, +						"3331 Set IO_CHANN " +						"CPU %d channel %d\n", +						idx, cpup->channel_id); +			} +out: +			cpu++; +			if (cpu >= phba->sli4_hba.num_present_cpu) { +				cpup = phba->sli4_hba.cpu_map; +				cpu = 0; +			} else { +				cpup++; +			} +		} +	} + +	if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) { +		cpup = phba->sli4_hba.cpu_map; +		for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { +			if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) { +				cpup->channel_id = 0; +				num_io_channel++; + +				lpfc_printf_log(phba, KERN_INFO, LOG_INIT, +						"3332 Assign IO_CHANN " +						"CPU %d channel %d\n", +						idx, cpup->channel_id); +			} +			cpup++; +		} +	} + +	/* Sanity check */ +	if (num_io_channel != phba->sli4_hba.num_present_cpu) +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"3333 Set affinity mismatch:" +				"%d chann != %d cpus: %d vectors\n", +				num_io_channel, phba->sli4_hba.num_present_cpu, +				vectors); + +	/* Enable using cpu affinity for scheduling */ +	phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; +	return 1; +} + + +/**   * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device   * @phba: pointer to lpfc hba data structure.   * @@ -6954,11 +8794,15 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)  	int vectors, rc, index;  	/* Set up MSI-X multi-message vectors */ -	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) +	for (index = 0; index < phba->cfg_fcp_io_channel; index++)  		phba->sli4_hba.msix_entries[index].entry = index;  	/* Configure MSI-X capability structure */ -	vectors = phba->sli4_hba.cfg_eqn; +	vectors = phba->cfg_fcp_io_channel; +	if (phba->cfg_fof) { +		phba->sli4_hba.msix_entries[index].entry = index; +		vectors++; +	}  enable_msix_vectors:  	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,  			     vectors); @@ -6978,29 +8822,28 @@ enable_msix_vectors:  				"message=%d\n", index,  				phba->sli4_hba.msix_entries[index].vector,  				phba->sli4_hba.msix_entries[index].entry); -	/* -	 * Assign MSI-X vectors to interrupt handlers -	 */ -	/* The first vector must associated to slow-path handler for MQ */ -	rc = request_irq(phba->sli4_hba.msix_entries[0].vector, -			 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, -			 LPFC_SP_DRIVER_HANDLER_NAME, phba); -	if (rc) { -		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, -				"0485 MSI-X slow-path request_irq failed " -				"(%d)\n", rc); -		goto msi_fail_out; -	} +	/* Assign MSI-X vectors to interrupt handlers */ +	for (index = 0; index < vectors; index++) { +		memset(&phba->sli4_hba.handler_name[index], 0, 16); +		sprintf((char *)&phba->sli4_hba.handler_name[index], +			 LPFC_DRIVER_HANDLER_NAME"%d", index); -	/* The rest of the vector(s) are associated to fast-path handler(s) */ -	for (index = 1; index < vectors; index++) { -		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; -		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; -		rc = request_irq(phba->sli4_hba.msix_entries[index].vector, -				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, -				 LPFC_FP_DRIVER_HANDLER_NAME, -				 &phba->sli4_hba.fcp_eq_hdl[index - 1]); +		phba->sli4_hba.fcp_eq_hdl[index].idx = index; +		phba->sli4_hba.fcp_eq_hdl[index].phba = phba; +		atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1); +		if (phba->cfg_fof && (index == (vectors - 1))) +			rc = request_irq( +				phba->sli4_hba.msix_entries[index].vector, +				 &lpfc_sli4_fof_intr_handler, IRQF_SHARED, +				 (char *)&phba->sli4_hba.handler_name[index], +				 &phba->sli4_hba.fcp_eq_hdl[index]); +		else +			rc = request_irq( +				phba->sli4_hba.msix_entries[index].vector, +				 &lpfc_sli4_hba_intr_handler, IRQF_SHARED, +				 (char *)&phba->sli4_hba.handler_name[index], +				 &phba->sli4_hba.fcp_eq_hdl[index]);  		if (rc) {  			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,  					"0486 MSI-X fast-path (%d) " @@ -7008,18 +8851,29 @@ enable_msix_vectors:  			goto cfg_fail_out;  		}  	} -	phba->sli4_hba.msix_vec_nr = vectors; +	if (phba->cfg_fof) +		vectors--; + +	if (vectors != phba->cfg_fcp_io_channel) { +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"3238 Reducing IO channels to match number of " +				"MSI-X vectors, requested %d got %d\n", +				phba->cfg_fcp_io_channel, vectors); +		phba->cfg_fcp_io_channel = vectors; +	} + +	lpfc_sli4_set_affinity(phba, vectors);  	return rc;  cfg_fail_out:  	/* free the irq already requested */ -	for (--index; index >= 1; index--) -		free_irq(phba->sli4_hba.msix_entries[index - 1].vector, -			 &phba->sli4_hba.fcp_eq_hdl[index - 1]); - -	/* free the irq already requested */ -	free_irq(phba->sli4_hba.msix_entries[0].vector, phba); +	for (--index; index >= 0; index--) { +		irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. +					  vector, NULL); +		free_irq(phba->sli4_hba.msix_entries[index].vector, +			 &phba->sli4_hba.fcp_eq_hdl[index]); +	}  msi_fail_out:  	/* Unconfigure MSI-X capability structure */ @@ -7040,12 +8894,16 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)  	int index;  	/* Free up MSI-X multi-message vectors */ -	free_irq(phba->sli4_hba.msix_entries[0].vector, phba); - -	for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++) +	for (index = 0; index < phba->cfg_fcp_io_channel; index++) { +		irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. +					  vector, NULL);  		free_irq(phba->sli4_hba.msix_entries[index].vector, -			 &phba->sli4_hba.fcp_eq_hdl[index - 1]); - +			 &phba->sli4_hba.fcp_eq_hdl[index]); +	} +	if (phba->cfg_fof) { +		free_irq(phba->sli4_hba.msix_entries[index].vector, +			 &phba->sli4_hba.fcp_eq_hdl[index]); +	}  	/* Disable MSI-X */  	pci_disable_msix(phba->pcidev); @@ -7090,11 +8948,15 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)  		return rc;  	} -	for (index = 0; index < phba->cfg_fcp_eq_count; index++) { +	for (index = 0; index < phba->cfg_fcp_io_channel; index++) {  		phba->sli4_hba.fcp_eq_hdl[index].idx = index;  		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;  	} +	if (phba->cfg_fof) { +		phba->sli4_hba.fcp_eq_hdl[index].idx = index; +		phba->sli4_hba.fcp_eq_hdl[index].phba = phba; +	}  	return 0;  } @@ -7170,10 +9032,18 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)  			/* Indicate initialization to INTx mode */  			phba->intr_type = INTx;  			intr_mode = 0; -			for (index = 0; index < phba->cfg_fcp_eq_count; +			for (index = 0; index < phba->cfg_fcp_io_channel;  			     index++) {  				phba->sli4_hba.fcp_eq_hdl[index].idx = index;  				phba->sli4_hba.fcp_eq_hdl[index].phba = phba; +				atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. +					fcp_eq_in_use, 1); +			} +			if (phba->cfg_fof) { +				phba->sli4_hba.fcp_eq_hdl[index].idx = index; +				phba->sli4_hba.fcp_eq_hdl[index].phba = phba; +				atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. +					fcp_eq_in_use, 1);  			}  		}  	} @@ -7224,6 +9094,9 @@ lpfc_unset_hba(struct lpfc_hba *phba)  	vport->load_flag |= FC_UNLOADING;  	spin_unlock_irq(shost->host_lock); +	kfree(phba->vpi_bmask); +	kfree(phba->vpi_ids); +  	lpfc_stop_hba_timers(phba);  	phba->pport->work_port_events = 0; @@ -7238,36 +9111,6 @@ lpfc_unset_hba(struct lpfc_hba *phba)  }  /** - * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to unset the HBA device initialization steps to - * a device with SLI-4 interface spec. - **/ -static void -lpfc_sli4_unset_hba(struct lpfc_hba *phba) -{ -	struct lpfc_vport *vport = phba->pport; -	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport); - -	spin_lock_irq(shost->host_lock); -	vport->load_flag |= FC_UNLOADING; -	spin_unlock_irq(shost->host_lock); - -	phba->pport->work_port_events = 0; - -	/* Stop the SLI4 device port */ -	lpfc_stop_port(phba); - -	lpfc_sli4_disable_intr(phba); - -	/* Reset SLI4 HBA FCoE function */ -	lpfc_pci_function_reset(phba); - -	return; -} - -/**   * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy   * @phba: Pointer to HBA context object.   * @@ -7327,6 +9170,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)  {  	int wait_cnt = 0;  	LPFC_MBOXQ_t *mboxq; +	struct pci_dev *pdev = phba->pcidev;  	lpfc_stop_hba_timers(phba);  	phba->sli4_hba.intr_enable = 0; @@ -7366,11 +9210,16 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)  	/* Disable PCI subsystem interrupt */  	lpfc_sli4_disable_intr(phba); +	/* Disable SR-IOV if enabled */ +	if (phba->cfg_sriov_nr_virtfn) +		pci_disable_sriov(pdev); +  	/* Stop kthread signal shall trigger work_done one more time */  	kthread_stop(phba->worker_thread);  	/* Reset SLI4 HBA FCoE function */  	lpfc_pci_function_reset(phba); +	lpfc_sli4_queue_destroy(phba);  	/* Stop the SLI4 device port */  	phba->pport->work_port_events = 0; @@ -7400,11 +9249,11 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)  	mqe = &mboxq->u.mqe;  	/* Read the port's SLI4 Parameters port capabilities */ -	lpfc_sli4_params(mboxq); +	lpfc_pc_sli4_params(mboxq);  	if (!phba->sli4_hba.intr_enable)  		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);  	else { -		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES); +		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);  		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);  	} @@ -7440,10 +9289,93 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)  	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);  	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);  	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); + +	/* Make sure that sge_supp_len can be handled by the driver */ +	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) +		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; +  	return rc;  }  /** + * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. + * @phba: Pointer to HBA context object. + * @mboxq: Pointer to the mailboxq memory for the mailbox command response. + * + * This function is called in the SLI4 code path to read the port's + * sli4 capabilities. + * + * This function may be be called from any context that can block-wait + * for the completion.  The expectation is that this routine is called + * typically from probe_one or from the online routine. + **/ +int +lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ +	int rc; +	struct lpfc_mqe *mqe = &mboxq->u.mqe; +	struct lpfc_pc_sli4_params *sli4_params; +	uint32_t mbox_tmo; +	int length; +	struct lpfc_sli4_parameters *mbx_sli4_parameters; + +	/* +	 * By default, the driver assumes the SLI4 port requires RPI +	 * header postings.  The SLI4_PARAM response will correct this +	 * assumption. +	 */ +	phba->sli4_hba.rpi_hdrs_in_use = 1; + +	/* Read the port's SLI4 Config Parameters */ +	length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - +		  sizeof(struct lpfc_sli4_cfg_mhdr)); +	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, +			 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, +			 length, LPFC_SLI4_MBX_EMBED); +	if (!phba->sli4_hba.intr_enable) +		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); +	else { +		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); +		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); +	} +	if (unlikely(rc)) +		return rc; +	sli4_params = &phba->sli4_hba.pc_sli4_params; +	mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; +	sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); +	sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); +	sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); +	sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, +					     mbx_sli4_parameters); +	sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, +					     mbx_sli4_parameters); +	if (bf_get(cfg_phwq, mbx_sli4_parameters)) +		phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; +	else +		phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; +	sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; +	sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); +	sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); +	sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); +	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); +	sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); +	sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); +	sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); +	sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, +					    mbx_sli4_parameters); +	sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, +					   mbx_sli4_parameters); +	phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); +	phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); + +	/* Make sure that sge_supp_len can be handled by the driver */ +	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) +		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; + +	return 0; +} + +/**   * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.   * @pdev: pointer to PCI device   * @pid: pointer to PCI device identifier @@ -7460,7 +9392,7 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)   * 	0 - driver can claim the device   * 	negative value - driver can not claim the device   **/ -static int __devinit +static int  lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)  {  	struct lpfc_hba   *phba; @@ -7476,11 +9408,8 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)  	/* Perform generic PCI device enabling operation */  	error = lpfc_enable_pci_dev(phba); -	if (error) { -		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"1401 Failed to enable pci device.\n"); +	if (error)  		goto out_free_phba; -	}  	/* Set up SLI API function jump table for PCI-device group-0 HBAs */  	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); @@ -7527,6 +9456,9 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)  		goto out_free_iocb_list;  	} +	/* Get the default values for Model Name and Description */ +	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); +  	/* Create SCSI host to the physical port */  	error = lpfc_create_shost(phba);  	if (error) { @@ -7627,7 +9559,7 @@ out_free_phba:   * removed from PCI bus, it performs all the necessary cleanup for the HBA   * device to be removed from the PCI subsystem properly.   **/ -static void __devexit +static void  lpfc_pci_remove_one_s3(struct pci_dev *pdev)  {  	struct Scsi_Host  *shost = pci_get_drvdata(pdev); @@ -7646,8 +9578,11 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)  	/* Release all the vports against this physical port */  	vports = lpfc_create_vport_work_array(phba);  	if (vports != NULL) -		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) +		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { +			if (vports[i]->port_type == LPFC_PHYSICAL_PORT) +				continue;  			fc_vport_terminate(vports[i]->fc_vport); +		}  	lpfc_destroy_vport_work_array(phba, vports);  	/* Remove FC host and then SCSI host with the physical port */ @@ -7661,13 +9596,16 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)  	 * the HBA.  	 */ -	/* HBA interrupt will be diabled after this call */ +	/* HBA interrupt will be disabled after this call */  	lpfc_sli_hba_down(phba);  	/* Stop kthread signal shall trigger work_done one more time */  	kthread_stop(phba->worker_thread);  	/* Final cleanup of txcmplq and reset the HBA */  	lpfc_sli_brdrestart(phba); +	kfree(phba->vpi_bmask); +	kfree(phba->vpi_ids); +  	lpfc_stop_hba_timers(phba);  	spin_lock_irq(&phba->hbalock);  	list_del_init(&vport->listentry); @@ -7675,10 +9613,13 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)  	lpfc_debugfs_terminate(vport); +	/* Disable SR-IOV if enabled */ +	if (phba->cfg_sriov_nr_virtfn) +		pci_disable_sriov(pdev); +  	/* Disable interrupt */  	lpfc_sli_disable_intr(phba); -	pci_set_drvdata(pdev, NULL);  	scsi_host_put(shost);  	/* @@ -7736,7 +9677,7 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)  			"0473 PCI device Power Management suspend.\n");  	/* Bring down the device */ -	lpfc_offline_prep(phba); +	lpfc_offline_prep(phba, LPFC_MBX_WAIT);  	lpfc_offline(phba);  	kthread_stop(phba->worker_thread); @@ -7833,9 +9774,6 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)  static void  lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)  { -	struct lpfc_sli *psli = &phba->sli; -	struct lpfc_sli_ring  *pring; -  	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  			"2723 PCI channel I/O abort preparing for recovery\n"); @@ -7843,8 +9781,7 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)  	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq  	 * and let the SCSI mid-layer to retry them to recover.  	 */ -	pring = &psli->ring[psli->fcp_ring]; -	lpfc_sli_abort_iocb_ring(phba, pring); +	lpfc_sli_abort_fcp_rings(phba);  }  /** @@ -7862,20 +9799,20 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)  			"2710 PCI channel disable preparing for reset\n");  	/* Block any management I/Os to the device */ -	lpfc_block_mgmt_io(phba); +	lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);  	/* Block all SCSI devices' I/Os on the host */  	lpfc_scsi_dev_block(phba); +	/* Flush all driver's outstanding SCSI I/Os as we are to reset */ +	lpfc_sli_flush_fcp_rings(phba); +  	/* stop all timers */  	lpfc_stop_hba_timers(phba);  	/* Disable interrupt and pci device */  	lpfc_sli_disable_intr(phba);  	pci_disable_device(phba->pcidev); - -	/* Flush all driver's outstanding SCSI I/Os as we are to reset */ -	lpfc_sli_flush_fcp_rings(phba);  }  /** @@ -8006,7 +9943,7 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev)  		phba->intr_mode = intr_mode;  	/* Take device offline, it will perform cleanup */ -	lpfc_offline_prep(phba); +	lpfc_offline_prep(phba, LPFC_MBX_WAIT);  	lpfc_offline(phba);  	lpfc_sli_brdrestart(phba); @@ -8060,13 +9997,153 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)  			return 50;  		else if (max_xri <= 1024)  			return 100; -		else +		else if (max_xri <= 1536)  			return 150; +		else if (max_xri <= 2048) +			return 200; +		else +			return 250;  	} else  		return 0;  }  /** + * lpfc_write_firmware - attempt to write a firmware image to the port + * @fw: pointer to firmware image returned from request_firmware. + * @phba: pointer to lpfc hba data structure. + * + **/ +static void +lpfc_write_firmware(const struct firmware *fw, void *context) +{ +	struct lpfc_hba *phba = (struct lpfc_hba *)context; +	char fwrev[FW_REV_STR_SIZE]; +	struct lpfc_grp_hdr *image; +	struct list_head dma_buffer_list; +	int i, rc = 0; +	struct lpfc_dmabuf *dmabuf, *next; +	uint32_t offset = 0, temp_offset = 0; + +	/* It can be null in no-wait mode, sanity check */ +	if (!fw) { +		rc = -ENXIO; +		goto out; +	} +	image = (struct lpfc_grp_hdr *)fw->data; + +	INIT_LIST_HEAD(&dma_buffer_list); +	if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || +	    (bf_get_be32(lpfc_grp_hdr_file_type, image) != +	     LPFC_FILE_TYPE_GROUP) || +	    (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) || +	    (be32_to_cpu(image->size) != fw->size)) { +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"3022 Invalid FW image found. " +				"Magic:%x Type:%x ID:%x\n", +				be32_to_cpu(image->magic_number), +				bf_get_be32(lpfc_grp_hdr_file_type, image), +				bf_get_be32(lpfc_grp_hdr_id, image)); +		rc = -EINVAL; +		goto release_out; +	} +	lpfc_decode_firmware_rev(phba, fwrev, 1); +	if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"3023 Updating Firmware, Current Version:%s " +				"New Version:%s\n", +				fwrev, image->revision); +		for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { +			dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), +					 GFP_KERNEL); +			if (!dmabuf) { +				rc = -ENOMEM; +				goto release_out; +			} +			dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, +							  SLI4_PAGE_SIZE, +							  &dmabuf->phys, +							  GFP_KERNEL); +			if (!dmabuf->virt) { +				kfree(dmabuf); +				rc = -ENOMEM; +				goto release_out; +			} +			list_add_tail(&dmabuf->list, &dma_buffer_list); +		} +		while (offset < fw->size) { +			temp_offset = offset; +			list_for_each_entry(dmabuf, &dma_buffer_list, list) { +				if (temp_offset + SLI4_PAGE_SIZE > fw->size) { +					memcpy(dmabuf->virt, +					       fw->data + temp_offset, +					       fw->size - temp_offset); +					temp_offset = fw->size; +					break; +				} +				memcpy(dmabuf->virt, fw->data + temp_offset, +				       SLI4_PAGE_SIZE); +				temp_offset += SLI4_PAGE_SIZE; +			} +			rc = lpfc_wr_object(phba, &dma_buffer_list, +				    (fw->size - offset), &offset); +			if (rc) +				goto release_out; +		} +		rc = offset; +	} + +release_out: +	list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { +		list_del(&dmabuf->list); +		dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, +				  dmabuf->virt, dmabuf->phys); +		kfree(dmabuf); +	} +	release_firmware(fw); +out: +	lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +			"3024 Firmware update done: %d.\n", rc); +	return; +} + +/** + * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to perform Linux generic firmware upgrade on device + * that supports such feature. + **/ +int +lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) +{ +	uint8_t file_name[ELX_MODEL_NAME_SIZE]; +	int ret; +	const struct firmware *fw; + +	/* Only supported on SLI4 interface type 2 for now */ +	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != +	    LPFC_SLI_INTF_IF_TYPE_2) +		return -EPERM; + +	snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); + +	if (fw_upgrade == INT_FW_UPGRADE) { +		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, +					file_name, &phba->pcidev->dev, +					GFP_KERNEL, (void *)phba, +					lpfc_write_firmware); +	} else if (fw_upgrade == RUN_FW_UPGRADE) { +		ret = request_firmware(&fw, file_name, &phba->pcidev->dev); +		if (!ret) +			lpfc_write_firmware(fw, (void *)phba); +	} else { +		ret = -EINVAL; +	} + +	return ret; +} + +/**   * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys   * @pdev: pointer to PCI device   * @pid: pointer to PCI device identifier @@ -8084,15 +10161,15 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)   * 	0 - driver can claim the device   * 	negative value - driver can not claim the device   **/ -static int __devinit +static int  lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)  {  	struct lpfc_hba   *phba;  	struct lpfc_vport *vport = NULL;  	struct Scsi_Host  *shost = NULL; -	int error; +	int error, ret;  	uint32_t cfg_mode, intr_mode; -	int mcnt; +	int adjusted_fcp_io_channel;  	/* Allocate memory for HBA structure */  	phba = lpfc_hba_alloc(pdev); @@ -8101,11 +10178,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)  	/* Perform generic PCI device enabling operation */  	error = lpfc_enable_pci_dev(phba); -	if (error) { -		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"1409 Failed to enable pci device.\n"); +	if (error)  		goto out_free_phba; -	}  	/* Set up SLI API function jump table for PCI-device group-1 HBAs */  	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); @@ -8149,6 +10223,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)  		goto out_unset_driver_resource_s4;  	} +	INIT_LIST_HEAD(&phba->active_rrq_list); +	INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); +  	/* Set up common device driver resources */  	error = lpfc_setup_driver_resource_phase2(phba);  	if (error) { @@ -8157,6 +10234,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)  		goto out_free_iocb_list;  	} +	/* Get the default values for Model Name and Description */ +	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); +  	/* Create SCSI host to the physical port */  	error = lpfc_create_shost(phba);  	if (error) { @@ -8177,59 +10257,44 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)  	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */  	/* Now, trying to enable interrupt and bring up the device */  	cfg_mode = phba->cfg_use_msi; -	while (true) { -		/* Put device to a known state before enabling interrupt */ -		lpfc_stop_port(phba); -		/* Configure and enable interrupt */ -		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); -		if (intr_mode == LPFC_INTR_ERROR) { -			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -					"0426 Failed to enable interrupt.\n"); -			error = -ENODEV; -			goto out_free_sysfs_attr; -		} -		/* Default to single FCP EQ for non-MSI-X */ -		if (phba->intr_type != MSIX) -			phba->cfg_fcp_eq_count = 1; -		else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count) -			phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; -		/* Set up SLI-4 HBA */ -		if (lpfc_sli4_hba_setup(phba)) { -			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -					"1421 Failed to set up hba\n"); -			error = -ENODEV; -			goto out_disable_intr; -		} -		/* Send NOP mbx cmds for non-INTx mode active interrupt test */ -		if (intr_mode != 0) -			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, -							    LPFC_ACT_INTR_CNT); - -		/* Check active interrupts received only for MSI/MSI-X */ -		if (intr_mode == 0 || -		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { -			/* Log the current active interrupt mode */ -			phba->intr_mode = intr_mode; -			lpfc_log_intr_mode(phba, intr_mode); -			break; -		} -		lpfc_printf_log(phba, KERN_INFO, LOG_INIT, -				"0451 Configure interrupt mode (%d) " -				"failed active interrupt test.\n", -				intr_mode); -		/* Unset the preivous SLI-4 HBA setup */ -		lpfc_sli4_unset_hba(phba); -		/* Try next level of interrupt mode */ -		cfg_mode = --intr_mode; +	/* Put device to a known state before enabling interrupt */ +	lpfc_stop_port(phba); +	/* Configure and enable interrupt */ +	intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); +	if (intr_mode == LPFC_INTR_ERROR) { +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"0426 Failed to enable interrupt.\n"); +		error = -ENODEV; +		goto out_free_sysfs_attr; +	} +	/* Default to single EQ for non-MSI-X */ +	if (phba->intr_type != MSIX) +		adjusted_fcp_io_channel = 1; +	else +		adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; +	phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; +	/* Set up SLI-4 HBA */ +	if (lpfc_sli4_hba_setup(phba)) { +		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +				"1421 Failed to set up hba\n"); +		error = -ENODEV; +		goto out_disable_intr;  	} +	/* Log the current active interrupt mode */ +	phba->intr_mode = intr_mode; +	lpfc_log_intr_mode(phba, intr_mode); +  	/* Perform post initialization setup */  	lpfc_post_init_setup(phba); +	/* check for firmware upgrade or downgrade */ +	if (phba->cfg_request_firmware_upgrade) +		ret = lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); +  	/* Check if there are static vports to be created. */  	lpfc_create_static_vport(phba); -  	return 0;  out_disable_intr: @@ -8264,7 +10329,7 @@ out_free_phba:   * removed from PCI bus, it performs all the necessary cleanup for the HBA   * device to be removed from the PCI subsystem properly.   **/ -static void __devexit +static void  lpfc_pci_remove_one_s4(struct pci_dev *pdev)  {  	struct Scsi_Host *shost = pci_get_drvdata(pdev); @@ -8284,8 +10349,11 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)  	/* Release all the vports against this physical port */  	vports = lpfc_create_vport_work_array(phba);  	if (vports != NULL) -		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) +		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { +			if (vports[i]->port_type == LPFC_PHYSICAL_PORT) +				continue;  			fc_vport_terminate(vports[i]->fc_vport); +		}  	lpfc_destroy_vport_work_array(phba, vports);  	/* Remove FC host and then SCSI host with the physical port */ @@ -8311,6 +10379,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)  	 * buffers are released to their corresponding pools here.  	 */  	lpfc_scsi_free(phba); +  	lpfc_sli4_driver_resource_unset(phba);  	/* Unmap adapter Control and Doorbell registers */ @@ -8357,12 +10426,13 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)  			"2843 PCI device Power Management suspend.\n");  	/* Bring down the device */ -	lpfc_offline_prep(phba); +	lpfc_offline_prep(phba, LPFC_MBX_WAIT);  	lpfc_offline(phba);  	kthread_stop(phba->worker_thread);  	/* Disable interrupt from device */  	lpfc_sli4_disable_intr(phba); +	lpfc_sli4_queue_destroy(phba);  	/* Save device state to PCI config space */  	pci_save_state(pdev); @@ -8454,17 +10524,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)  static void  lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)  { -	struct lpfc_sli *psli = &phba->sli; -	struct lpfc_sli_ring  *pring; -  	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  			"2828 PCI channel I/O abort preparing for recovery\n");  	/*  	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq  	 * and let the SCSI mid-layer to retry them to recover.  	 */ -	pring = &psli->ring[psli->fcp_ring]; -	lpfc_sli_abort_iocb_ring(phba, pring); +	lpfc_sli_abort_fcp_rings(phba);  }  /** @@ -8482,20 +10548,21 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)  			"2826 PCI channel disable preparing for reset\n");  	/* Block any management I/Os to the device */ -	lpfc_block_mgmt_io(phba); +	lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);  	/* Block all SCSI devices' I/Os on the host */  	lpfc_scsi_dev_block(phba); +	/* Flush all driver's outstanding SCSI I/Os as we are to reset */ +	lpfc_sli_flush_fcp_rings(phba); +  	/* stop all timers */  	lpfc_stop_hba_timers(phba);  	/* Disable interrupt and pci device */  	lpfc_sli4_disable_intr(phba); +	lpfc_sli4_queue_destroy(phba);  	pci_disable_device(phba->pcidev); - -	/* Flush all driver's outstanding SCSI I/Os as we are to reset */ -	lpfc_sli_flush_fcp_rings(phba);  }  /** @@ -8600,6 +10667,13 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)  	}  	pci_restore_state(pdev); + +	/* +	 * As the new kernel behavior of pci_restore_state() API call clears +	 * device saved_state flag, need to save the restored state again. +	 */ +	pci_save_state(pdev); +  	if (pdev->is_busmaster)  		pci_set_master(pdev); @@ -8647,7 +10721,7 @@ lpfc_io_resume_s4(struct pci_dev *pdev)  	 */  	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {  		/* Perform device reset */ -		lpfc_offline_prep(phba); +		lpfc_offline_prep(phba, LPFC_MBX_WAIT);  		lpfc_offline(phba);  		lpfc_sli_brdrestart(phba);  		/* Bring the device back online */ @@ -8677,7 +10751,7 @@ lpfc_io_resume_s4(struct pci_dev *pdev)   * 	0 - driver can claim the device   * 	negative value - driver can not claim the device   **/ -static int __devinit +static int  lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)  {  	int rc; @@ -8705,7 +10779,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)   * remove routine, which will perform all the necessary cleanup for the   * device to be removed from the PCI subsystem properly.   **/ -static void __devexit +static void  lpfc_pci_remove_one(struct pci_dev *pdev)  {  	struct Scsi_Host *shost = pci_get_drvdata(pdev); @@ -8907,6 +10981,168 @@ lpfc_io_resume(struct pci_dev *pdev)  	return;  } +/** + * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter + * @phba: pointer to lpfc hba data structure. + * + * This routine checks to see if OAS is supported for this adapter. If + * supported, the configure Flash Optimized Fabric flag is set.  Otherwise, + * the enable oas flag is cleared and the pool created for OAS device data + * is destroyed. + * + **/ +void +lpfc_sli4_oas_verify(struct lpfc_hba *phba) +{ + +	if (!phba->cfg_EnableXLane) +		return; + +	if (phba->sli4_hba.pc_sli4_params.oas_supported) { +		phba->cfg_fof = 1; +	} else { +		phba->cfg_fof = 0; +		if (phba->device_data_mem_pool) +			mempool_destroy(phba->device_data_mem_pool); +		phba->device_data_mem_pool = NULL; +	} + +	return; +} + +/** + * lpfc_fof_queue_setup - Set up all the fof queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up all the fof queues for the FC HBA + * operation. + * + * Return codes + *      0 - successful + *      -ENOMEM - No available memory + **/ +int +lpfc_fof_queue_setup(struct lpfc_hba *phba) +{ +	struct lpfc_sli *psli = &phba->sli; +	int rc; + +	rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); +	if (rc) +		return -ENOMEM; + +	if (phba->cfg_fof) { + +		rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, +				    phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); +		if (rc) +			goto out_oas_cq; + +		rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, +				    phba->sli4_hba.oas_cq, LPFC_FCP); +		if (rc) +			goto out_oas_wq; + +		phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING]; +		phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING]; +	} + +	return 0; + +out_oas_wq: +	lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); +out_oas_cq: +	lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); +	return rc; + +} + +/** + * lpfc_fof_queue_create - Create all the fof queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate all the fof queues for the FC HBA + * operation. For each SLI4 queue type, the parameters such as queue entry + * count (queue depth) shall be taken from the module parameter. For now, + * we just use some constant number as place holder. + * + * Return codes + *      0 - successful + *      -ENOMEM - No availble memory + *      -EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_fof_queue_create(struct lpfc_hba *phba) +{ +	struct lpfc_queue *qdesc; + +	/* Create FOF EQ */ +	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, +				      phba->sli4_hba.eq_ecount); +	if (!qdesc) +		goto out_error; + +	phba->sli4_hba.fof_eq = qdesc; + +	if (phba->cfg_fof) { + +		/* Create OAS CQ */ +		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, +						      phba->sli4_hba.cq_ecount); +		if (!qdesc) +			goto out_error; + +		phba->sli4_hba.oas_cq = qdesc; + +		/* Create OAS WQ */ +		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, +					      phba->sli4_hba.wq_ecount); +		if (!qdesc) +			goto out_error; + +		phba->sli4_hba.oas_wq = qdesc; + +	} +	return 0; + +out_error: +	lpfc_fof_queue_destroy(phba); +	return -ENOMEM; +} + +/** + * lpfc_fof_queue_destroy - Destroy all the fof queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to release all the SLI4 queues with the FC HBA + * operation. + * + * Return codes + *      0 - successful + **/ +int +lpfc_fof_queue_destroy(struct lpfc_hba *phba) +{ +	/* Release FOF Event queue */ +	if (phba->sli4_hba.fof_eq != NULL) { +		lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); +		phba->sli4_hba.fof_eq = NULL; +	} + +	/* Release OAS Completion queue */ +	if (phba->sli4_hba.oas_cq != NULL) { +		lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); +		phba->sli4_hba.oas_cq = NULL; +	} + +	/* Release OAS Work queue */ +	if (phba->sli4_hba.oas_wq != NULL) { +		lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); +		phba->sli4_hba.oas_wq = NULL; +	} +	return 0; +} +  static struct pci_device_id lpfc_id_table[] = {  	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,  		PCI_ANY_ID, PCI_ANY_ID, }, @@ -8990,12 +11226,24 @@ static struct pci_device_id lpfc_id_table[] = {  		PCI_ANY_ID, PCI_ANY_ID, },  	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,  		PCI_ANY_ID, PCI_ANY_ID, }, +	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, +		PCI_ANY_ID, PCI_ANY_ID, }, +	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, +		PCI_ANY_ID, PCI_ANY_ID, }, +	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, +		PCI_ANY_ID, PCI_ANY_ID, }, +	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, +		PCI_ANY_ID, PCI_ANY_ID, }, +	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK, +		PCI_ANY_ID, PCI_ANY_ID, }, +	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF, +		PCI_ANY_ID, PCI_ANY_ID, },  	{ 0 }  };  MODULE_DEVICE_TABLE(pci, lpfc_id_table); -static struct pci_error_handlers lpfc_err_handler = { +static const struct pci_error_handlers lpfc_err_handler = {  	.error_detected = lpfc_io_error_detected,  	.slot_reset = lpfc_io_slot_reset,  	.resume = lpfc_io_resume, @@ -9005,12 +11253,22 @@ static struct pci_driver lpfc_driver = {  	.name		= LPFC_DRIVER_NAME,  	.id_table	= lpfc_id_table,  	.probe		= lpfc_pci_probe_one, -	.remove		= __devexit_p(lpfc_pci_remove_one), +	.remove		= lpfc_pci_remove_one,  	.suspend        = lpfc_pci_suspend_one,  	.resume		= lpfc_pci_resume_one,  	.err_handler    = &lpfc_err_handler,  }; +static const struct file_operations lpfc_mgmt_fop = { +	.owner = THIS_MODULE, +}; + +static struct miscdevice lpfc_mgmt_dev = { +	.minor = MISC_DYNAMIC_MINOR, +	.name = "lpfcmgmt", +	.fops = &lpfc_mgmt_fop, +}; +  /**   * lpfc_init - lpfc module initialization routine   * @@ -9026,11 +11284,17 @@ static struct pci_driver lpfc_driver = {  static int __init  lpfc_init(void)  { +	int cpu;  	int error = 0;  	printk(LPFC_MODULE_DESC "\n");  	printk(LPFC_COPYRIGHT "\n"); +	error = misc_register(&lpfc_mgmt_dev); +	if (error) +		printk(KERN_ERR "Could not register lpfcmgmt device, " +			"misc_register returned with status %d", error); +  	if (lpfc_enable_npiv) {  		lpfc_transport_functions.vport_create = lpfc_vport_create;  		lpfc_transport_functions.vport_delete = lpfc_vport_delete; @@ -9047,6 +11311,13 @@ lpfc_init(void)  			return -ENOMEM;  		}  	} + +	/* Initialize in case vector mapping is needed */ +	lpfc_used_cpu = NULL; +	lpfc_present_cpu = 0; +	for_each_present_cpu(cpu) +		lpfc_present_cpu++; +  	error = pci_register_driver(&lpfc_driver);  	if (error) {  		fc_release_transport(lpfc_transport_template); @@ -9067,6 +11338,7 @@ lpfc_init(void)  static void __exit  lpfc_exit(void)  { +	misc_deregister(&lpfc_mgmt_dev);  	pci_unregister_driver(&lpfc_driver);  	fc_release_transport(lpfc_transport_template);  	if (lpfc_enable_npiv) @@ -9084,6 +11356,7 @@ lpfc_exit(void)  				(1L << _dump_buf_dif_order), _dump_buf_dif);  		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);  	} +	kfree(lpfc_used_cpu);  }  module_init(lpfc_init);  | 
