aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1174
1 files changed, 946 insertions, 228 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 74b67d98e95..32ada050557 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2012 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -71,6 +71,8 @@ static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
int);
static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
uint32_t);
+static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
+static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -263,6 +265,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
return NULL;
q->hba_index = idx;
+
+ /*
+ * insert barrier for instruction interlock : data from the hardware
+ * must have the valid bit checked before it can be copied and acted
+ * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
+ * instructions allowing action on content before valid bit checked,
+ * add barrier here as well. May not be needed as "content" is a
+ * single 32-bit entity here (vs multi word structure for cq's).
+ */
+ mb();
return eqe;
}
@@ -368,6 +380,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
cqe = q->qe[q->hba_index].cqe;
q->hba_index = idx;
+
+ /*
+ * insert barrier for instruction interlock : data from the hardware
+ * must have the valid bit checked before it can be copied and acted
+ * upon. Speculative instructions were allowing a bcopy at the start
+ * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
+ * after our return, to copy data before the valid bit check above
+ * was done. As such, some of the copied data was stale. The barrier
+ * ensures the check is before any data is copied.
+ */
+ mb();
return cqe;
}
@@ -438,11 +461,12 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *temp_hrqe;
struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell;
- int put_index = hq->host_index;
+ int put_index;
/* sanity check on queue memory */
if (unlikely(!hq) || unlikely(!dq))
return -ENOMEM;
+ put_index = hq->host_index;
temp_hrqe = hq->qe[hq->host_index].rqe;
temp_drqe = dq->qe[dq->host_index].rqe;
@@ -632,7 +656,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
if (!ndlp)
goto out;
- if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
+ if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
rrq->send_rrq = 0;
rrq->xritag = 0;
rrq->rrq_stop_time = 0;
@@ -666,7 +690,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
- next_time = jiffies + HZ * (phba->fc_ratov + 1);
+ next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
list_for_each_entry_safe(rrq, nextrrq,
&phba->active_rrq_list, list) {
if (time_after(jiffies, rrq->rrq_stop_time))
@@ -675,7 +699,8 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
next_time = rrq->rrq_stop_time;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
- if (!list_empty(&phba->active_rrq_list))
+ if ((!list_empty(&phba->active_rrq_list)) &&
+ (!(phba->pport->load_flag & FC_UNLOADING)))
mod_timer(&phba->rrq_tmr, next_time);
list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
list_del(&rrq->list);
@@ -781,7 +806,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
return;
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
- next_time = jiffies + HZ * (phba->fc_ratov * 2);
+ next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
list_splice_init(&phba->active_rrq_list, &rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -789,7 +814,9 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
list_del(&rrq->list);
lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
}
- if (!list_empty(&phba->active_rrq_list))
+ if ((!list_empty(&phba->active_rrq_list)) &&
+ (!(phba->pport->load_flag & FC_UNLOADING)))
+
mod_timer(&phba->rrq_tmr, next_time);
}
@@ -810,7 +837,9 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
{
if (!ndlp)
return 0;
- if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+ if (!ndlp->active_rrqs_xri_bitmap)
+ return 0;
+ if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
return 1;
else
return 0;
@@ -860,7 +889,10 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
goto out;
- if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+ if (!ndlp->active_rrqs_xri_bitmap)
+ goto out;
+
+ if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
goto out;
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -872,14 +904,17 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
xritag, rxid, ndlp->nlp_DID, send_rrq);
return -EINVAL;
}
- rrq->send_rrq = send_rrq;
+ if (phba->cfg_enable_rrq == 1)
+ rrq->send_rrq = send_rrq;
+ else
+ rrq->send_rrq = 0;
rrq->xritag = xritag;
- rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
+ rrq->rrq_stop_time = jiffies +
+ msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
- rrq->send_rrq = send_rrq;
spin_lock_irqsave(&phba->hbalock, iflags);
empty = list_empty(&phba->active_rrq_list);
list_add_tail(&rrq->list, &phba->active_rrq_list);
@@ -923,8 +958,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
} else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
!(piocbq->iocb_flag & LPFC_IO_LIBDFC))
ndlp = piocbq->context_un.ndlp;
- else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
- (piocbq->iocb_flag & LPFC_IO_LIBDFC))
+ else if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
ndlp = piocbq->context_un.ndlp;
else
ndlp = piocbq->context1;
@@ -1008,6 +1042,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
else
sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
+
if (sglq) {
if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
(sglq->state != SGL_XRI_ABORTED)) {
@@ -1024,7 +1059,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
&phba->sli4_hba.lpfc_sgl_list);
/* Check if TXQ queue needs to be serviced */
- if (pring->txq_cnt)
+ if (!list_empty(&pring->txq))
lpfc_worker_wake_up(phba);
}
}
@@ -1055,6 +1090,7 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+
/*
* Clean all volatile data fields, preserve iotag and node struct.
*/
@@ -1121,7 +1157,6 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
while (!list_empty(iocblist)) {
list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
-
if (!piocb->iocb_cmpl)
lpfc_sli_release_iocbq(phba, piocb);
else {
@@ -1309,18 +1344,17 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
list_add_tail(&piocb->list, &pring->txcmplq);
piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
- pring->txcmplq_cnt++;
- if (pring->txcmplq_cnt > pring->txcmplq_max)
- pring->txcmplq_max = pring->txcmplq_cnt;
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
- (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+ (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
+ (!(piocb->vport->load_flag & FC_UNLOADING))) {
if (!piocb->vport)
BUG();
else
mod_timer(&piocb->vport->els_tmofunc,
- jiffies + HZ * (phba->fc_ratov << 1));
+ jiffies +
+ msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
}
@@ -1343,8 +1377,6 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
struct lpfc_iocbq *cmd_iocb;
list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
- if (cmd_iocb != NULL)
- pring->txq_cnt--;
return cmd_iocb;
}
@@ -1613,8 +1645,9 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
* (c) link attention events can be processed (fcp ring only)
* (d) IOCB processing is not blocked by the outstanding mbox command.
*/
- if (pring->txq_cnt &&
- lpfc_is_link_up(phba) &&
+
+ if (lpfc_is_link_up(phba) &&
+ (!list_empty(&pring->txq)) &&
(pring->ringno != phba->sli.fcp_ring ||
phba->sli.sli_flag & LPFC_PROCESS_LA)) {
@@ -2322,7 +2355,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
/* Mailbox cmd <cmd> Cmpl <cmpl> */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
- "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+ "x%x x%x x%x\n",
pmb->vport ? pmb->vport->vpi : 0,
pmbox->mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba, pmb),
@@ -2336,7 +2370,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
pmbox->un.varWords[4],
pmbox->un.varWords[5],
pmbox->un.varWords[6],
- pmbox->un.varWords[7]);
+ pmbox->un.varWords[7],
+ pmbox->un.varWords[8],
+ pmbox->un.varWords[9],
+ pmbox->un.varWords[10]);
if (pmb->mbox_cmpl)
pmb->mbox_cmpl(phba,pmb);
@@ -2611,7 +2648,6 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
cmd_iocb = phba->sli.iocbq_lookup[iotag];
list_del_init(&cmd_iocb->list);
if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
- pring->txcmplq_cnt--;
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
}
return cmd_iocb;
@@ -2649,7 +2685,6 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
/* remove from txcmpl queue list */
list_del_init(&cmd_iocb->list);
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
- pring->txcmplq_cnt--;
return cmd_iocb;
}
}
@@ -2892,8 +2927,9 @@ void lpfc_poll_eratt(unsigned long ptr)
lpfc_worker_wake_up(phba);
else
/* Restart the timer for next eratt poll */
- mod_timer(&phba->eratt_poll, jiffies +
- HZ * LPFC_ERATT_POLL_INTERVAL);
+ mod_timer(&phba->eratt_poll,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
return;
}
@@ -3257,7 +3293,7 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (free_saveq) {
list_for_each_entry_safe(rspiocbp, next_iocb,
&saveq->list, list) {
- list_del(&rspiocbp->list);
+ list_del_init(&rspiocbp->list);
__lpfc_sli_release_iocbq(phba, rspiocbp);
}
__lpfc_sli_release_iocbq(phba, saveq);
@@ -3496,15 +3532,27 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
/* Error everything on txq and txcmplq
* First do the txq.
*/
- spin_lock_irq(&phba->hbalock);
- list_splice_init(&pring->txq, &completions);
- pring->txq_cnt = 0;
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ spin_lock_irq(&pring->ring_lock);
+ list_splice_init(&pring->txq, &completions);
+ pring->txq_cnt = 0;
+ spin_unlock_irq(&pring->ring_lock);
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ spin_lock_irq(&phba->hbalock);
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ spin_unlock_irq(&phba->hbalock);
+ } else {
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&pring->txq, &completions);
+ pring->txq_cnt = 0;
- spin_unlock_irq(&phba->hbalock);
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ spin_unlock_irq(&phba->hbalock);
+ }
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
@@ -3512,6 +3560,36 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
}
/**
+ * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in FCP rings and frees all the iocb
+ * objects in txq. This function issues an abort iocb for all the iocb commands
+ * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
+ * the return of this function. The caller is not required to hold any locks.
+ **/
+void
+lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ uint32_t i;
+
+ /* Look on all the FCP Rings for the iotag */
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
+ pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
+ lpfc_sli_abort_iocb_ring(phba, pring);
+ }
+ } else {
+ pring = &psli->ring[psli->fcp_ring];
+ lpfc_sli_abort_iocb_ring(phba, pring);
+ }
+}
+
+
+/**
* lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
* @phba: Pointer to HBA context object.
*
@@ -3528,30 +3606,55 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
LIST_HEAD(txcmplq);
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
-
- /* Currently, only one fcp ring */
- pring = &psli->ring[psli->fcp_ring];
+ uint32_t i;
spin_lock_irq(&phba->hbalock);
- /* Retrieve everything on txq */
- list_splice_init(&pring->txq, &txq);
- pring->txq_cnt = 0;
-
- /* Retrieve everything on the txcmplq */
- list_splice_init(&pring->txcmplq, &txcmplq);
- pring->txcmplq_cnt = 0;
-
/* Indicate the I/O queues are flushed */
phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
- /* Flush the txq */
- lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_DOWN);
+ /* Look on all the FCP Rings for the iotag */
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
+ pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
+
+ spin_lock_irq(&pring->ring_lock);
+ /* Retrieve everything on txq */
+ list_splice_init(&pring->txq, &txq);
+ /* Retrieve everything on the txcmplq */
+ list_splice_init(&pring->txcmplq, &txcmplq);
+ pring->txq_cnt = 0;
+ pring->txcmplq_cnt = 0;
+ spin_unlock_irq(&pring->ring_lock);
+
+ /* Flush the txq */
+ lpfc_sli_cancel_iocbs(phba, &txq,
+ IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ /* Flush the txcmpq */
+ lpfc_sli_cancel_iocbs(phba, &txcmplq,
+ IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ }
+ } else {
+ pring = &psli->ring[psli->fcp_ring];
- /* Flush the txcmpq */
- lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_DOWN);
+ spin_lock_irq(&phba->hbalock);
+ /* Retrieve everything on txq */
+ list_splice_init(&pring->txq, &txq);
+ /* Retrieve everything on the txcmplq */
+ list_splice_init(&pring->txcmplq, &txcmplq);
+ pring->txq_cnt = 0;
+ pring->txcmplq_cnt = 0;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Flush the txq */
+ lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ /* Flush the txcmpq */
+ lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ }
}
/**
@@ -3954,12 +4057,13 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
uint16_t cfg_value;
- int rc;
+ int rc = 0;
/* Reset HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "0295 Reset HBA Data: x%x x%x\n",
- phba->pport->port_state, psli->sli_flag);
+ "0295 Reset HBA Data: x%x x%x x%x\n",
+ phba->pport->port_state, psli->sli_flag,
+ phba->hba_flag);
/* perform board reset */
phba->fc_eventTag = 0;
@@ -3972,6 +4076,12 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
phba->fcf.fcf_flag = 0;
spin_unlock_irq(&phba->hbalock);
+ /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
+ if (phba->hba_flag & HBA_FW_DUMP_OP) {
+ phba->hba_flag &= ~HBA_FW_DUMP_OP;
+ return rc;
+ }
+
/* Now physically reset the device */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0389 Performing PCI function reset!\n");
@@ -4565,7 +4675,8 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2708 This device does not support "
- "Advanced Error Reporting (AER)\n");
+ "Advanced Error Reporting (AER): %d\n",
+ rc);
phba->cfg_aer_support = 0;
}
}
@@ -4967,12 +5078,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
LPFC_QUEUE_REARM);
} while (++fcp_eqidx < phba->cfg_fcp_io_channel);
}
+
+ if (phba->cfg_fof)
+ lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
+
if (phba->sli4_hba.hba_eq) {
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
fcp_eqidx++)
lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
LPFC_QUEUE_REARM);
}
+
+ if (phba->cfg_fof)
+ lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
}
/**
@@ -5498,6 +5616,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
list_del_init(&rsrc_blk->list);
kfree(rsrc_blk);
}
+ phba->sli4_hba.max_cfg_param.vpi_used = 0;
break;
case LPFC_RSC_TYPE_FCOE_XRI:
kfree(phba->sli4_hba.xri_bmask);
@@ -5798,6 +5917,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
} else {
kfree(phba->vpi_bmask);
+ phba->sli4_hba.max_cfg_param.vpi_used = 0;
kfree(phba->vpi_ids);
bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
kfree(phba->sli4_hba.xri_bmask);
@@ -5979,7 +6099,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
struct lpfc_sglq *sglq_entry = NULL;
struct lpfc_sglq *sglq_entry_next = NULL;
struct lpfc_sglq *sglq_entry_first = NULL;
- int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
+ int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
int last_xritag = NO_XRI;
LIST_HEAD(prep_sgl_list);
LIST_HEAD(blck_sgl_list);
@@ -5987,10 +6107,11 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
LIST_HEAD(post_sgl_list);
LIST_HEAD(free_sgl_list);
- spin_lock(&phba->hbalock);
+ spin_lock_irq(&phba->hbalock);
list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
- spin_unlock(&phba->hbalock);
+ spin_unlock_irq(&phba->hbalock);
+ total_cnt = phba->sli4_hba.els_xri_cnt;
list_for_each_entry_safe(sglq_entry, sglq_entry_next,
&allc_sgl_list, list) {
list_del_init(&sglq_entry->list);
@@ -6042,9 +6163,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
sglq_entry->sli4_xritag);
list_add_tail(&sglq_entry->list,
&free_sgl_list);
- spin_lock_irq(&phba->hbalock);
- phba->sli4_hba.els_xri_cnt--;
- spin_unlock_irq(&phba->hbalock);
+ total_cnt--;
}
}
}
@@ -6072,9 +6191,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
(sglq_entry_first->sli4_xritag +
post_cnt - 1));
list_splice_init(&blck_sgl_list, &free_sgl_list);
- spin_lock_irq(&phba->hbalock);
- phba->sli4_hba.els_xri_cnt -= post_cnt;
- spin_unlock_irq(&phba->hbalock);
+ total_cnt -= post_cnt;
}
/* don't reset xirtag due to hole in xri block */
@@ -6084,16 +6201,18 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
/* reset els sgl post count for next round of posting */
post_cnt = 0;
}
+ /* update the number of XRIs posted for ELS */
+ phba->sli4_hba.els_xri_cnt = total_cnt;
/* free the els sgls failed to post */
lpfc_free_sgl_list(phba, &free_sgl_list);
/* push els sgls posted to the availble list */
if (!list_empty(&post_sgl_list)) {
- spin_lock(&phba->hbalock);
+ spin_lock_irq(&phba->hbalock);
list_splice_init(&post_sgl_list,
&phba->sli4_hba.lpfc_sgl_list);
- spin_unlock(&phba->hbalock);
+ spin_unlock_irq(&phba->hbalock);
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3161 Failure to post els sgl to port.\n");
@@ -6160,6 +6279,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
kfree(vpd);
goto out_free_mbox;
}
+
mqe = &mboxq->u.mqe;
phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
@@ -6246,6 +6366,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
+ /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
+ rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
+ if (phba->pport->cfg_lun_queue_depth > rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3362 LUN queue depth changed from %d to %d\n",
+ phba->pport->cfg_lun_queue_depth, rc);
+ phba->pport->cfg_lun_queue_depth = rc;
+ }
+
+
/*
* Discover the port's supported feature set and match it against the
* hosts requests.
@@ -6433,16 +6563,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start the ELS watchdog timer */
mod_timer(&vport->els_tmofunc,
- jiffies + HZ * (phba->fc_ratov * 2));
+ jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
- jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
phba->hb_outstanding = 0;
phba->last_completion_time = jiffies;
/* Start error attention (ERATT) polling timer */
- mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+ mod_timer(&phba->eratt_poll,
+ jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
/* Enable PCIe device Advanced Error Reporting (AER) if configured */
if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
@@ -6551,6 +6682,108 @@ lpfc_mbox_timeout(unsigned long ptr)
return;
}
+/**
+ * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
+ * are pending
+ * @phba: Pointer to HBA context object.
+ *
+ * This function checks if any mailbox completions are present on the mailbox
+ * completion queue.
+ **/
+bool
+lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
+{
+
+ uint32_t idx;
+ struct lpfc_queue *mcq;
+ struct lpfc_mcqe *mcqe;
+ bool pending_completions = false;
+
+ if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+ return false;
+
+ /* Check for completions on mailbox completion queue */
+
+ mcq = phba->sli4_hba.mbx_cq;
+ idx = mcq->hba_index;
+ while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
+ mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
+ if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
+ (!bf_get_le32(lpfc_trailer_async, mcqe))) {
+ pending_completions = true;
+ break;
+ }
+ idx = (idx + 1) % mcq->entry_count;
+ if (mcq->hba_index == idx)
+ break;
+ }
+ return pending_completions;
+
+}
+
+/**
+ * lpfc_sli4_process_missed_mbox_completions - process mbox completions
+ * that were missed.
+ * @phba: Pointer to HBA context object.
+ *
+ * For sli4, it is possible to miss an interrupt. As such mbox completions
+ * maybe missed causing erroneous mailbox timeouts to occur. This function
+ * checks to see if mbox completions are on the mailbox completion queue
+ * and will process all the completions associated with the eq for the
+ * mailbox completion queue.
+ **/
+bool
+lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
+{
+
+ uint32_t eqidx;
+ struct lpfc_queue *fpeq = NULL;
+ struct lpfc_eqe *eqe;
+ bool mbox_pending;
+
+ if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+ return false;
+
+ /* Find the eq associated with the mcq */
+
+ if (phba->sli4_hba.hba_eq)
+ for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++)
+ if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
+ phba->sli4_hba.mbx_cq->assoc_qid) {
+ fpeq = phba->sli4_hba.hba_eq[eqidx];
+ break;
+ }
+ if (!fpeq)
+ return false;
+
+ /* Turn off interrupts from this EQ */
+
+ lpfc_sli4_eq_clr_intr(fpeq);
+
+ /* Check to see if a mbox completion is pending */
+
+ mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
+
+ /*
+ * If a mbox completion is pending, process all the events on EQ
+ * associated with the mbox completion queue (this could include
+ * mailbox commands, async events, els commands, receive queue data
+ * and fcp commands)
+ */
+
+ if (mbox_pending)
+ while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+ lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
+ fpeq->EQ_processed++;
+ }
+
+ /* Always clear and re-arm the EQ */
+
+ lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+
+ return mbox_pending;
+
+}
/**
* lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
@@ -6566,7 +6799,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
MAILBOX_t *mb = &pmbox->u.mb;
struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring;
+
+ /* If the mailbox completed, process the completion and return */
+ if (lpfc_sli4_process_missed_mbox_completions(phba))
+ return;
/* Check the pmbox pointer first. There is a race condition
* between the mbox timeout handler getting executed in the
@@ -6604,8 +6840,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
- pring = &psli->ring[psli->fcp_ring];
- lpfc_sli_abort_iocb_ring(phba, pring);
+ lpfc_sli_abort_fcp_rings(phba);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0345 Resetting board due to mailbox timeout\n");
@@ -6809,8 +7044,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
goto out_not_finished;
}
/* timeout active mbox command */
- mod_timer(&psli->mbox_tmo, (jiffies +
- (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
+ 1000);
+ mod_timer(&psli->mbox_tmo, jiffies + timeout);
}
/* Mailbox cmd <cmd> issue */
@@ -7061,6 +7297,10 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
1000) + jiffies;
spin_unlock_irq(&phba->hbalock);
+ /* Make sure the mailbox is really active */
+ if (timeout)
+ lpfc_sli4_process_missed_mbox_completions(phba);
+
/* Wait for the outstnading mailbox command to complete */
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
@@ -7483,7 +7723,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
/* Start timer for the mbox_tmo and log some mailbox post messages */
mod_timer(&psli->mbox_tmo, (jiffies +
- (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
+ msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
@@ -7614,7 +7854,6 @@ __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
/* Insert the caller's iocb in the txq tail for later processing. */
list_add_tail(&piocb->list, &pring->txq);
- pring->txq_cnt++;
}
/**
@@ -7902,15 +8141,22 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
static inline uint32_t
lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
{
- int i;
-
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
- i = smp_processor_id();
- else
- i = atomic_add_return(1, &phba->fcp_qidx);
-
- i = (i % phba->cfg_fcp_io_channel);
- return i;
+ struct lpfc_vector_map_info *cpup;
+ int chann, cpu;
+
+ if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
+ && phba->cfg_fcp_io_channel > 1) {
+ cpu = smp_processor_id();
+ if (cpu < phba->sli4_hba.num_present_cpu) {
+ cpup = phba->sli4_hba.cpu_map;
+ cpup += cpu;
+ return cpup->channel_id;
+ }
+ chann = cpu;
+ }
+ chann = atomic_add_return(1, &phba->fcp_qidx);
+ chann = (chann % phba->cfg_fcp_io_channel);
+ return chann;
}
/**
@@ -7962,6 +8208,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
abort_tag = (uint32_t) iocbq->iotag;
xritag = iocbq->sli4_xritag;
wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
+ wqe->generic.wqe_com.word10 = 0;
/* words0-2 bpl convert bde */
if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
@@ -8055,6 +8302,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
+ wqe->els_req.max_response_payload_len = total_len - xmit_len;
break;
case CMD_XMIT_SEQUENCE64_CX:
bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
@@ -8099,8 +8347,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
command_type = FCP_COMMAND_DATA_OUT;
/* word3 iocb=iotag wqe=payload_offset_len */
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- wqe->fcp_iwrite.payload_offset_len =
- xmit_len + sizeof(struct fcp_rsp);
+ bf_set(payload_offset_len, &wqe->fcp_iwrite,
+ xmit_len + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_iwrite,
+ 0);
/* word4 iocb=parameter wqe=total_xfer_length memcpy */
/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
@@ -8114,12 +8364,22 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
break;
case CMD_FCP_IREAD64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- wqe->fcp_iread.payload_offset_len =
- xmit_len + sizeof(struct fcp_rsp);
+ bf_set(payload_offset_len, &wqe->fcp_iread,
+ xmit_len + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_iread,
+ 0);
/* word4 iocb=parameter wqe=total_xfer_length memcpy */
/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
@@ -8133,10 +8393,23 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
break;
case CMD_FCP_ICMND64_CR:
+ /* word3 iocb=iotag wqe=payload_offset_len */
+ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+ bf_set(payload_offset_len, &wqe->fcp_icmd,
+ xmit_len + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_icmd,
+ 0);
/* word3 iocb=IO_TAG wqe=reserved */
- wqe->fcp_icmd.rsrvd3 = 0;
bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
@@ -8148,6 +8421,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
iocbq->iocb.ulpFCP2Rcvy);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
break;
case CMD_GEN_REQUEST64_CR:
/* For this command calculate the xmit length of the
@@ -8182,6 +8463,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
+ wqe->gen_req.max_response_payload_len = total_len - xmit_len;
command_type = OTHER_COMMAND;
break;
case CMD_XMIT_ELS_RSP64_CX:
@@ -8379,6 +8661,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
{
struct lpfc_sglq *sglq;
union lpfc_wqe wqe;
+ struct lpfc_queue *wq;
struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
if (piocb->sli4_xritag == NO_XRI) {
@@ -8386,7 +8669,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
sglq = NULL;
else {
- if (pring->txq_cnt) {
+ if (!list_empty(&pring->txq)) {
if (!(flag & SLI_IOCB_RET_IOCB)) {
__lpfc_sli_ringtx_put(phba,
pring, piocb);
@@ -8431,11 +8714,17 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return IOCB_ERROR;
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
- (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
- &wqe))
+ (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+ if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) {
+ wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
+ } else {
+ wq = phba->sli4_hba.oas_wq;
+ }
+ if (lpfc_sli4_wq_put(wq, &wqe))
return IOCB_ERROR;
} else {
+ if (unlikely(!phba->sli4_hba.els_wq))
+ return IOCB_ERROR;
if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
return IOCB_ERROR;
}
@@ -8521,12 +8810,20 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
if (phba->sli_rev == LPFC_SLI_REV4) {
if (piocb->iocb_flag & LPFC_IO_FCP) {
- if (unlikely(!phba->sli4_hba.fcp_wq))
- return IOCB_ERROR;
- idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
- piocb->fcp_wqidx = idx;
- ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
-
+ if (!phba->cfg_fof || (!(piocb->iocb_flag &
+ LPFC_IO_OAS))) {
+ if (unlikely(!phba->sli4_hba.fcp_wq))
+ return IOCB_ERROR;
+ idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+ piocb->fcp_wqidx = idx;
+ ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
+ } else {
+ if (unlikely(!phba->sli4_hba.oas_wq))
+ return IOCB_ERROR;
+ idx = 0;
+ piocb->fcp_wqidx = 0;
+ ring_number = LPFC_FCP_OAS_RING;
+ }
pring = &phba->sli.ring[ring_number];
spin_lock_irqsave(&pring->ring_lock, iflags);
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
@@ -8700,7 +8997,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"3116 Port generated FCP XRI ABORT event on "
"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
- ndlp->vport->vpi, ndlp->nlp_rpi,
+ ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
bf_get(lpfc_wcqe_xa_xri, axri),
bf_get(lpfc_wcqe_xa_status, axri),
axri->parameter);
@@ -8948,6 +9245,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
pring->sli.sli3.next_cmdidx = 0;
pring->sli.sli3.local_getidx = 0;
pring->sli.sli3.cmdidx = 0;
+ pring->flag = 0;
INIT_LIST_HEAD(&pring->txq);
INIT_LIST_HEAD(&pring->txcmplq);
INIT_LIST_HEAD(&pring->iocb_continueq);
@@ -9054,7 +9352,6 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
if (iocb->vport != vport)
continue;
list_move_tail(&iocb->list, &completions);
- pring->txq_cnt--;
}
/* Next issue ABTS for everything on the txcmplq */
@@ -9123,8 +9420,6 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
* given to the FW yet.
*/
list_splice_init(&pring->txq, &completions);
- pring->txq_cnt = 0;
-
}
spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -9586,43 +9881,6 @@ abort_iotag_exit:
}
/**
- * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
- * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
- *
- * This function aborts all iocbs in the given ring and frees all the iocb
- * objects in txq. This function issues abort iocbs unconditionally for all
- * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
- * to complete before the return of this function. The caller is not required
- * to hold any locks.
- **/
-static void
-lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
-{
- LIST_HEAD(completions);
- struct lpfc_iocbq *iocb, *next_iocb;
-
- if (pring->ringno == LPFC_ELS_RING)
- lpfc_fabric_abort_hba(phba);
-
- spin_lock_irq(&phba->hbalock);
-
- /* Take off all the iocbs on txq for cancelling */
- list_splice_init(&pring->txq, &completions);
- pring->txq_cnt = 0;
-
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_abort_iotag_issue(phba, pring, iocb);
-
- spin_unlock_irq(&phba->hbalock);
-
- /* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
-}
-
-/**
* lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
* @phba: pointer to lpfc HBA data structure.
*
@@ -9637,7 +9895,7 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
- lpfc_sli_iocb_ring_abort(phba, pring);
+ lpfc_sli_abort_iocb_ring(phba, pring);
}
}
@@ -9759,7 +10017,7 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3096 ABORT_XRI_CN completing on xri x%x "
+ "3096 ABORT_XRI_CN completing on rpi x%x "
"original iotag x%x, abort cmd iotag x%x "
"status 0x%x, reason 0x%x\n",
cmdiocb->iocb.un.acxri.abortContextTag,
@@ -9809,6 +10067,13 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abort_cmd) != 0)
continue;
+ /*
+ * If the iocbq is already being aborted, don't take a second
+ * action, but do count it.
+ */
+ if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+ continue;
+
/* issue ABTS for this IOCB based on iotag */
abtsiocb = lpfc_sli_get_iocbq(phba);
if (abtsiocb == NULL) {
@@ -9816,6 +10081,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
continue;
}
+ /* indicate the IO is being aborted by the driver. */
+ iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+
cmd = &iocbq->iocb;
abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
@@ -9825,7 +10093,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
abtsiocb->iocb.ulpLe = 1;
abtsiocb->iocb.ulpClass = cmd->ulpClass;
- abtsiocb->vport = phba->pport;
+ abtsiocb->vport = vport;
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
@@ -9852,6 +10120,124 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
}
/**
+ * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
+ * @vport: Pointer to virtual port.
+ * @pring: Pointer to driver SLI ring object.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ *
+ * This function sends an abort command for every SCSI command
+ * associated with the given virtual port pending on the ring
+ * filtered by lpfc_sli_validate_fcp_iocb function.
+ * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
+ * FCP iocbs associated with lun specified by tgt_id and lun_id
+ * parameters
+ * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
+ * FCP iocbs associated with SCSI target specified by tgt_id parameter.
+ * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
+ * FCP iocbs associated with virtual port.
+ * This function returns number of iocbs it aborted .
+ * This function is called with no locks held right after a taskmgmt
+ * command is sent.
+ **/
+int
+lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
+ uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *abtsiocbq;
+ struct lpfc_iocbq *iocbq;
+ IOCB_t *icmd;
+ int sum, i, ret_val;
+ unsigned long iflags;
+ struct lpfc_sli_ring *pring_s4;
+ uint32_t ring_number;
+
+ spin_lock_irq(&phba->hbalock);
+
+ /* all I/Os are in process of being flushed */
+ if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+ }
+ sum = 0;
+
+ for (i = 1; i <= phba->sli.last_iotag; i++) {
+ iocbq = phba->sli.iocbq_lookup[i];
+
+ if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
+ cmd) != 0)
+ continue;
+
+ /*
+ * If the iocbq is already being aborted, don't take a second
+ * action, but do count it.
+ */
+ if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+ continue;
+
+ /* issue ABTS for this IOCB based on iotag */
+ abtsiocbq = __lpfc_sli_get_iocbq(phba);
+ if (abtsiocbq == NULL)
+ continue;
+
+ icmd = &iocbq->iocb;
+ abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
+ abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ abtsiocbq->iocb.un.acxri.abortIoTag =
+ iocbq->sli4_xritag;
+ else
+ abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
+ abtsiocbq->iocb.ulpLe = 1;
+ abtsiocbq->iocb.ulpClass = icmd->ulpClass;
+ abtsiocbq->vport = vport;
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx;
+ if (iocbq->iocb_flag & LPFC_IO_FCP)
+ abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
+
+ if (lpfc_is_link_up(phba))
+ abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
+ else
+ abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
+
+ /* Setup callback routine and issue the command. */
+ abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+
+ /*
+ * Indicate the IO is being aborted by the driver and set
+ * the caller's flag into the aborted IO.
+ */
+ iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ ring_number = MAX_SLI3_CONFIGURED_RINGS +
+ iocbq->fcp_wqidx;
+ pring_s4 = &phba->sli.ring[ring_number];
+ /* Note: both hbalock and ring_lock must be set here */
+ spin_lock_irqsave(&pring_s4->ring_lock, iflags);
+ ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
+ abtsiocbq, 0);
+ spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
+ } else {
+ ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
+ abtsiocbq, 0);
+ }
+
+
+ if (ret_val == IOCB_ERROR)
+ __lpfc_sli_release_iocbq(phba, abtsiocbq);
+ else
+ sum++;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ return sum;
+}
+
+/**
* lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
* @phba: Pointer to HBA context object.
* @cmdiocbq: Pointer to command iocb.
@@ -9878,6 +10264,24 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
struct lpfc_scsi_buf *lpfc_cmd;
spin_lock_irqsave(&phba->hbalock, iflags);
+ if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
+
+ /*
+ * A time out has occurred for the iocb. If a time out
+ * completion handler has been supplied, call it. Otherwise,
+ * just free the iocbq.
+ */
+
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
+ cmdiocbq->wait_iocb_cmpl = NULL;
+ if (cmdiocbq->iocb_cmpl)
+ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
+ else
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ return;
+ }
+
cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
if (cmdiocbq->context2 && rspiocbq)
memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
@@ -9933,10 +10337,16 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba,
* @timeout: Timeout in number of seconds.
*
* This function issues the iocb to firmware and waits for the
- * iocb to complete. If the iocb command is not
- * completed within timeout seconds, it returns IOCB_TIMEDOUT.
- * Caller should not free the iocb resources if this function
- * returns IOCB_TIMEDOUT.
+ * iocb to complete. The iocb_cmpl field of the shall be used
+ * to handle iocbs which time out. If the field is NULL, the
+ * function shall free the iocbq structure. If more clean up is
+ * needed, the caller is expected to provide a completion function
+ * that will provide the needed clean up. If the iocb command is
+ * not completed within timeout seconds, the function will either
+ * free the iocbq structure (if iocb_cmpl == NULL) or execute the
+ * completion function set in the iocb_cmpl field and then return
+ * a status of IOCB_TIMEDOUT. The caller should not free the iocb
+ * resources if this function returns IOCB_TIMEDOUT.
* The function waits for the iocb completion using an
* non-interruptible wait.
* This function will sleep while waiting for iocb completion.
@@ -9965,7 +10375,13 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
long timeleft, timeout_req = 0;
int retval = IOCB_SUCCESS;
uint32_t creg_val;
+ struct lpfc_iocbq *iocb;
+ int txq_cnt = 0;
+ int txcmplq_cnt = 0;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ unsigned long iflags;
+ bool iocb_completed = true;
+
/*
* If the caller has provided a response iocbq buffer, then context2
* is NULL or its an error.
@@ -9976,9 +10392,10 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
piocb->context2 = prspiocbq;
}
+ piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
piocb->context_un.wait_queue = &done_q;
- piocb->iocb_flag &= ~LPFC_IO_WAKE;
+ piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
if (lpfc_readl(phba->HCregaddr, &creg_val))
@@ -9991,14 +10408,30 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
SLI_IOCB_RET_IOCB);
if (retval == IOCB_SUCCESS) {
- timeout_req = timeout * HZ;
+ timeout_req = msecs_to_jiffies(timeout * 1000);
timeleft = wait_event_timeout(done_q,
lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
+
+ /*
+ * IOCB timed out. Inform the wake iocb wait
+ * completion function and set local status
+ */
- if (piocb->iocb_flag & LPFC_IO_WAKE) {
+ iocb_completed = false;
+ piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (iocb_completed) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0331 IOCB wake signaled\n");
+ /* Note: we are not indicating if the IOCB has a success
+ * status or not - that's for the caller to check.
+ * IOCB_SUCCESS means just that the command was sent and
+ * completed. Not that it completed successfully.
+ * */
} else if (timeleft == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0338 IOCB wait timeout error - no "
@@ -10012,9 +10445,17 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
retval = IOCB_TIMEDOUT;
}
} else if (retval == IOCB_BUSY) {
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
- phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
+ if (phba->cfg_log_verbose & LOG_SLI) {
+ list_for_each_entry(iocb, &pring->txq, list) {
+ txq_cnt++;
+ }
+ list_for_each_entry(iocb, &pring->txcmplq, list) {
+ txcmplq_cnt++;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
+ phba->iocb_cnt, txq_cnt, txcmplq_cnt);
+ }
return retval;
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -10070,12 +10511,13 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
uint32_t timeout)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
+ MAILBOX_t *mb = NULL;
int retval;
unsigned long flag;
- /* The caller must leave context1 empty. */
+ /* The caller might set context1 for extended buffer */
if (pmboxq->context1)
- return MBX_NOT_FINISHED;
+ mb = (MAILBOX_t *)pmboxq->context1;
pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
/* setup wake call as IOCB callback */
@@ -10088,22 +10530,25 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
wait_event_interruptible_timeout(done_q,
pmboxq->mbox_flag & LPFC_MBX_WAKE,
- timeout * HZ);
+ msecs_to_jiffies(timeout * 1000));
spin_lock_irqsave(&phba->hbalock, flag);
- pmboxq->context1 = NULL;
+ /* restore the possible extended buffer for free resource */
+ pmboxq->context1 = (uint8_t *)mb;
/*
* if LPFC_MBX_WAKE flag is set the mailbox is completed
* else do not free the resources.
*/
if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
retval = MBX_SUCCESS;
- lpfc_sli4_swap_str(phba, pmboxq);
} else {
retval = MBX_TIMEOUT;
pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
spin_unlock_irqrestore(&phba->hbalock, flag);
+ } else {
+ /* restore the possible extended buffer for free resource */
+ pmboxq->context1 = (uint8_t *)mb;
}
return retval;
@@ -10988,8 +11433,11 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
struct lpfc_iocbq *pIocbOut,
struct lpfc_wcqe_complete *wcqe)
{
+ int numBdes, i;
unsigned long iflags;
- uint32_t status;
+ uint32_t status, max_response;
+ struct lpfc_dmabuf *dmabuf;
+ struct ulp_bde64 *bpl, bde;
size_t offset = offsetof(struct lpfc_iocbq, iocb);
memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
@@ -11006,7 +11454,36 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
else {
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
+ switch (pIocbOut->iocb.ulpCommand) {
+ case CMD_ELS_REQUEST64_CR:
+ dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+ bpl = (struct ulp_bde64 *)dmabuf->virt;
+ bde.tus.w = le32_to_cpu(bpl[1].tus.w);
+ max_response = bde.tus.f.bdeSize;
+ break;
+ case CMD_GEN_REQUEST64_CR:
+ max_response = 0;
+ if (!pIocbOut->context3)
+ break;
+ numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
+ sizeof(struct ulp_bde64);
+ dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+ bpl = (struct ulp_bde64 *)dmabuf->virt;
+ for (i = 0; i < numBdes; i++) {
+ bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+ if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+ max_response += bde.tus.f.bdeSize;
+ }
+ break;
+ default:
+ max_response = wcqe->total_data_placed;
+ break;
+ }
+ if (max_response < wcqe->total_data_placed)
+ pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
+ else
+ pIocbIn->iocb.un.genreq64.bdl.bdeSize =
+ wcqe->total_data_placed;
}
/* Convert BG errors for completion status */
@@ -11297,16 +11774,25 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_iocbq *irspiocbq;
unsigned long iflags;
struct lpfc_sli_ring *pring = cq->pring;
+ int txq_cnt = 0;
+ int txcmplq_cnt = 0;
+ int fcp_txcmplq_cnt = 0;
/* Get an irspiocbq for later ELS response processing use */
irspiocbq = lpfc_sli_get_iocbq(phba);
if (!irspiocbq) {
+ if (!list_empty(&pring->txq))
+ txq_cnt++;
+ if (!list_empty(&pring->txcmplq))
+ txcmplq_cnt++;
+ if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
+ fcp_txcmplq_cnt++;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
"fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
- pring->txq_cnt, phba->iocb_cnt,
- phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
- phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
+ txq_cnt, phba->iocb_cnt,
+ fcp_txcmplq_cnt,
+ txcmplq_cnt);
return false;
}
@@ -11877,6 +12363,175 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
}
+
+/**
+ * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
+ * entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the Flash Optimized Fabric
+ * event queue. It will check the MajorCode and MinorCode to determine this
+ * is for a completion event on a completion queue, if not, an error shall be
+ * logged and just return. Otherwise, it will get to the corresponding
+ * completion queue and process all the entries on the completion queue, rearm
+ * the completion queue, and then return.
+ **/
+static void
+lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+{
+ struct lpfc_queue *cq;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ uint16_t cqid;
+ int ecount = 0;
+
+ if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9147 Not a valid completion "
+ "event: majorcode=x%x, minorcode=x%x\n",
+ bf_get_le32(lpfc_eqe_major_code, eqe),
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
+ return;
+ }
+
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+ /* Next check for OAS */
+ cq = phba->sli4_hba.oas_cq;
+ if (unlikely(!cq)) {
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9148 OAS completion queue "
+ "does not exist\n");
+ return;
+ }
+
+ if (unlikely(cqid != cq->queue_id)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9149 Miss-matched fast-path compl "
+ "queue id: eqcqid=%d, fcpcqid=%d\n",
+ cqid, cq->queue_id);
+ return;
+ }
+
+ /* Process all the entries to the OAS CQ */
+ while ((cqe = lpfc_sli4_cq_get(cq))) {
+ workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+ if (!(++ecount % cq->entry_repost))
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+ }
+
+ /* Track the max number of CQEs processed in 1 EQ */
+ if (ecount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ecount;
+
+ /* Catch the no cq entry condition */
+ if (unlikely(ecount == 0))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9153 No entry from fast-path completion "
+ "queue fcpcqid=%d\n", cq->queue_id);
+
+ /* In any case, flash and re-arm the CQ */
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+ /* wake up worker thread if there are works to be done */
+ if (workposted)
+ lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
+ * IOCB ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The Flash Optimized Fabric ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the EQ to CQ are one-to-one map such that the EQ index is
+ * equal to that of CQ index.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+ struct lpfc_queue *eq;
+ struct lpfc_eqe *eqe;
+ unsigned long iflag;
+ int ecount = 0;
+ uint32_t eqidx;
+
+ /* Get the driver's phba structure from the dev_id */
+ fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
+ phba = fcp_eq_hdl->phba;
+ eqidx = fcp_eq_hdl->idx;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /* Get to the EQ struct associated with this vector */
+ eq = phba->sli4_hba.fof_eq;
+ if (unlikely(!eq))
+ return IRQ_NONE;
+
+ /* Check device state for handling interrupt */
+ if (unlikely(lpfc_intr_state_check(phba))) {
+ eq->EQ_badstate++;
+ /* Check again for link_state with lock held */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (phba->link_state < LPFC_LINK_DOWN)
+ /* Flush, clear interrupt, and rearm the EQ */
+ lpfc_sli4_eq_flush(phba, eq);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Process all the event on FCP fast-path EQ
+ */
+ while ((eqe = lpfc_sli4_eq_get(eq))) {
+ lpfc_sli4_fof_handle_eqe(phba, eqe);
+ if (!(++ecount % eq->entry_repost))
+ lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
+ eq->EQ_processed++;
+ }
+
+ /* Track the max number of EQEs processed in 1 intr */
+ if (ecount > eq->EQ_max_eqe)
+ eq->EQ_max_eqe = ecount;
+
+
+ if (unlikely(ecount == 0)) {
+ eq->EQ_no_entry++;
+
+ if (phba->intr_type == MSIX)
+ /* MSI-X treated interrupt served as no EQ share INT */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "9145 MSI-X interrupt with no EQE\n");
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9146 ISR interrupt with no EQE\n");
+ /* Non MSI-X treated on interrupt as EQ share INT */
+ return IRQ_NONE;
+ }
+ }
+ /* Always clear and re-arm the fast-path EQ */
+ lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+ return IRQ_HANDLED;
+}
+
/**
* lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
* @irq: Interrupt number.
@@ -12032,6 +12687,13 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
hba_handled |= true;
}
+ if (phba->cfg_fof) {
+ hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
+ &phba->sli4_hba.fcp_eq_hdl[0]);
+ if (hba_irq_rc == IRQ_HANDLED)
+ hba_handled |= true;
+ }
+
return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
} /* lpfc_sli4_intr_handler */
@@ -12148,7 +12810,6 @@ static void __iomem *
lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
{
struct pci_dev *pdev;
- unsigned long bar_map, bar_map_len;
if (!phba->pcidev)
return NULL;
@@ -12157,25 +12818,10 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
switch (pci_barset) {
case WQ_PCI_BAR_0_AND_1:
- if (!phba->pci_bar0_memmap_p) {
- bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
- bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
- phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
- }
return phba->pci_bar0_memmap_p;
case WQ_PCI_BAR_2_AND_3:
- if (!phba->pci_bar2_memmap_p) {
- bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
- bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
- phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
- }
return phba->pci_bar2_memmap_p;
case WQ_PCI_BAR_4_AND_5:
- if (!phba->pci_bar4_memmap_p) {
- bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
- bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
- phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
- }
return phba->pci_bar4_memmap_p;
default:
break;
@@ -12784,10 +13430,44 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
wq->page_count);
bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
cq->queue_id);
+
+ /* wqv is the earliest version supported, NOT the latest */
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.wqv);
- if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
+ switch (phba->sli4_hba.pc_sli4_params.wqv) {
+ case LPFC_Q_CREATE_VERSION_0:
+ switch (wq->entry_size) {
+ default:
+ case 64:
+ /* Nothing to do, version 0 ONLY supports 64 byte */
+ page = wq_create->u.request.page;
+ break;
+ case 128:
+ if (!(phba->sli4_hba.pc_sli4_params.wqsize &
+ LPFC_WQ_SZ128_SUPPORT)) {
+ status = -ERANGE;
+ goto out;
+ }
+ /* If we get here the HBA MUST also support V1 and
+ * we MUST use it
+ */
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ LPFC_Q_CREATE_VERSION_1);
+
+ bf_set(lpfc_mbx_wq_create_wqe_count,
+ &wq_create->u.request_1, wq->entry_count);
+ bf_set(lpfc_mbx_wq_create_wqe_size,
+ &wq_create->u.request_1,
+ LPFC_WQ_WQE_SIZE_128);
+ bf_set(lpfc_mbx_wq_create_page_size,
+ &wq_create->u.request_1,
+ (PAGE_SIZE/SLI4_PAGE_SIZE));
+ page = wq_create->u.request_1.page;
+ break;
+ }
+ break;
+ case LPFC_Q_CREATE_VERSION_1:
bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
wq->entry_count);
switch (wq->entry_size) {
@@ -12798,6 +13478,11 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
LPFC_WQ_WQE_SIZE_64);
break;
case 128:
+ if (!(phba->sli4_hba.pc_sli4_params.wqsize &
+ LPFC_WQ_SZ128_SUPPORT)) {
+ status = -ERANGE;
+ goto out;
+ }
bf_set(lpfc_mbx_wq_create_wqe_size,
&wq_create->u.request_1,
LPFC_WQ_WQE_SIZE_128);
@@ -12806,9 +13491,12 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
(PAGE_SIZE/SLI4_PAGE_SIZE));
page = wq_create->u.request_1.page;
- } else {
- page = wq_create->u.request.page;
+ break;
+ default:
+ status = -ERANGE;
+ goto out;
}
+
list_for_each_entry(dmabuf, &wq->page_list, list) {
memset(dmabuf->virt, 0, hw_page_size);
page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
@@ -12870,8 +13558,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
}
wq->db_regaddr = bar_memmap_p + db_offset;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3264 WQ[%d]: barset:x%x, offset:x%x\n",
- wq->queue_id, pci_barset, db_offset);
+ "3264 WQ[%d]: barset:x%x, offset:x%x, "
+ "format:x%x\n", wq->queue_id, pci_barset,
+ db_offset, wq->db_format);
} else {
wq->db_format = LPFC_DB_LIST_FORMAT;
wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
@@ -13091,8 +13780,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
}
hrq->db_regaddr = bar_memmap_p + db_offset;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
- hrq->queue_id, pci_barset, db_offset);
+ "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
+ "format:x%x\n", hrq->queue_id, pci_barset,
+ db_offset, hrq->db_format);
} else {
hrq->db_format = LPFC_DB_RING_FORMAT;
hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@@ -13942,13 +14632,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
}
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2538 Received frame rctl:%s type:%s "
- "Frame Data:%08x %08x %08x %08x %08x %08x\n",
- rctl_names[fc_hdr->fh_r_ctl],
- type_names[fc_hdr->fh_type],
+ "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
+ "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
+ rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
+ type_names[fc_hdr->fh_type], fc_hdr->fh_type,
be32_to_cpu(header[0]), be32_to_cpu(header[1]),
be32_to_cpu(header[2]), be32_to_cpu(header[3]),
- be32_to_cpu(header[4]), be32_to_cpu(header[5]));
+ be32_to_cpu(header[4]), be32_to_cpu(header[5]),
+ be32_to_cpu(header[6]));
return 0;
drop:
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@@ -14626,14 +15317,20 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq->iocb.unsli3.rcvsli3.vpi =
vport->phba->vpi_ids[vport->vpi];
/* put the first buffer into the first IOCBq */
+ tot_len = bf_get(lpfc_rcqe_length,
+ &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
first_iocbq->context2 = &seq_dmabuf->dbuf;
first_iocbq->context3 = NULL;
first_iocbq->iocb.ulpBdeCount = 1;
- first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+ if (tot_len > LPFC_DATA_BUF_SIZE)
+ first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
+ else
+ first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
+
first_iocbq->iocb.un.rcvels.remoteID = sid;
- tot_len = bf_get(lpfc_rcqe_length,
- &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
}
iocbq = first_iocbq;
@@ -14649,14 +15346,17 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
if (!iocbq->context3) {
iocbq->context3 = d_buf;
iocbq->iocb.ulpBdeCount++;
- pbde = (struct ulp_bde64 *)
- &iocbq->iocb.unsli3.sli3Words[4];
- pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
-
/* We need to get the size out of the right CQE */
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
len = bf_get(lpfc_rcqe_length,
&hbq_buf->cq_event.cqe.rcqe_cmpl);
+ pbde = (struct ulp_bde64 *)
+ &iocbq->iocb.unsli3.sli3Words[4];
+ if (len > LPFC_DATA_BUF_SIZE)
+ pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
+ else
+ pbde->tus.f.bdeSize = len;
+
iocbq->iocb.unsli3.rcvsli3.acc_len += len;
tot_len += len;
} else {
@@ -14671,16 +15371,19 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
lpfc_in_buf_free(vport->phba, d_buf);
continue;
}
+ /* We need to get the size out of the right CQE */
+ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ len = bf_get(lpfc_rcqe_length,
+ &hbq_buf->cq_event.cqe.rcqe_cmpl);
iocbq->context2 = d_buf;
iocbq->context3 = NULL;
iocbq->iocb.ulpBdeCount = 1;
- iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+ if (len > LPFC_DATA_BUF_SIZE)
+ iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
+ else
+ iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
- /* We need to get the size out of the right CQE */
- hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
- len = bf_get(lpfc_rcqe_length,
- &hbq_buf->cq_event.cqe.rcqe_cmpl);
tot_len += len;
iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
@@ -14962,6 +15665,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
uint16_t max_rpi, rpi_limit;
uint16_t rpi_remaining, lrpi = 0;
struct lpfc_rpi_hdr *rpi_hdr;
+ unsigned long iflag;
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
rpi_limit = phba->sli4_hba.next_rpi;
@@ -14970,7 +15674,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* Fetch the next logical rpi. Because this index is logical,
* the driver starts at 0 each time.
*/
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflag);
rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
@@ -14986,7 +15690,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
*/
if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
(phba->sli4_hba.rpi_count >= max_rpi)) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return rpi;
}
@@ -14995,7 +15699,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* extents.
*/
if (!phba->sli4_hba.rpi_hdrs_in_use) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return rpi;
}
@@ -15006,7 +15710,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* how many are supported max by the device.
*/
rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
if (!rpi_hdr) {
@@ -15481,11 +16185,18 @@ lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
LPFC_SLI4_FCF_TBL_INDX_MAX);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"3060 Last IDX %d\n", last_index);
- if (list_empty(&phba->fcf.fcf_pri_list)) {
+
+ /* Verify the priority list has 2 or more entries */
+ spin_lock_irq(&phba->hbalock);
+ if (list_empty(&phba->fcf.fcf_pri_list) ||
+ list_is_singular(&phba->fcf.fcf_pri_list)) {
+ spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"3061 Last IDX %d\n", last_index);
return 0; /* Empty rr list */
}
+ spin_unlock_irq(&phba->hbalock);
+
next_fcf_pri = 0;
/*
* Clear the rr_bmask and set all of the bits that are at this
@@ -15659,7 +16370,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
void
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
{
- struct lpfc_fcf_pri *fcf_pri;
+ struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2762 FCF (x%x) reached driver's book "
@@ -15669,7 +16380,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
}
/* Clear the eligible FCF record index bmask */
spin_lock_irq(&phba->hbalock);
- list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+ list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
+ list) {
if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
list_del_init(&fcf_pri->list);
break;
@@ -16239,35 +16951,41 @@ lpfc_drain_txq(struct lpfc_hba *phba)
{
LIST_HEAD(completions);
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
- struct lpfc_iocbq *piocbq = 0;
+ struct lpfc_iocbq *piocbq = NULL;
unsigned long iflags = 0;
char *fail_msg = NULL;
struct lpfc_sglq *sglq;
union lpfc_wqe wqe;
+ int txq_cnt = 0;
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (pring->txq_cnt > pring->txq_max)
- pring->txq_max = pring->txq_cnt;
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ list_for_each_entry(piocbq, &pring->txq, list) {
+ txq_cnt++;
+ }
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (txq_cnt > pring->txq_max)
+ pring->txq_max = txq_cnt;
- while (pring->txq_cnt) {
- spin_lock_irqsave(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ while (!list_empty(&pring->txq)) {
+ spin_lock_irqsave(&pring->ring_lock, iflags);
piocbq = lpfc_sli_ringtx_get(phba, pring);
if (!piocbq) {
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2823 txq empty and txq_cnt is %d\n ",
- pring->txq_cnt);
+ txq_cnt);
break;
}
sglq = __lpfc_sli_get_sglq(phba, piocbq);
if (!sglq) {
__lpfc_sli_ringtx_put(phba, pring, piocbq);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
break;
}
+ txq_cnt--;
/* The xri and iocb resources secured,
* attempt to issue request
@@ -16292,12 +17010,12 @@ lpfc_drain_txq(struct lpfc_hba *phba)
piocbq->iotag, piocbq->sli4_xritag);
list_add_tail(&piocbq->list, &completions);
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
}
/* Cancel all the IOCBs that cannot be issued */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
- return pring->txq_cnt;
+ return txq_cnt;
}