aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1568
1 files changed, 1269 insertions, 299 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 624eab37039..32ada050557 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2012 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -71,6 +71,8 @@ static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
int);
static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
uint32_t);
+static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
+static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -124,10 +126,17 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
/* Ring Doorbell */
doorbell.word0 = 0;
- bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
- bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
- bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
- writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
+ if (q->db_format == LPFC_DB_LIST_FORMAT) {
+ bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
+ bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
+ bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
+ } else if (q->db_format == LPFC_DB_RING_FORMAT) {
+ bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
+ bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
+ } else {
+ return -EINVAL;
+ }
+ writel(doorbell.word0, q->db_regaddr);
return 0;
}
@@ -256,6 +265,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
return NULL;
q->hba_index = idx;
+
+ /*
+ * insert barrier for instruction interlock : data from the hardware
+ * must have the valid bit checked before it can be copied and acted
+ * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
+ * instructions allowing action on content before valid bit checked,
+ * add barrier here as well. May not be needed as "content" is a
+ * single 32-bit entity here (vs multi word structure for cq's).
+ */
+ mb();
return eqe;
}
@@ -361,6 +380,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
cqe = q->qe[q->hba_index].cqe;
q->hba_index = idx;
+
+ /*
+ * insert barrier for instruction interlock : data from the hardware
+ * must have the valid bit checked before it can be copied and acted
+ * upon. Speculative instructions were allowing a bcopy at the start
+ * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
+ * after our return, to copy data before the valid bit check above
+ * was done. As such, some of the copied data was stale. The barrier
+ * ensures the check is before any data is copied.
+ */
+ mb();
return cqe;
}
@@ -431,11 +461,12 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *temp_hrqe;
struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell;
- int put_index = hq->host_index;
+ int put_index;
/* sanity check on queue memory */
if (unlikely(!hq) || unlikely(!dq))
return -ENOMEM;
+ put_index = hq->host_index;
temp_hrqe = hq->qe[hq->host_index].rqe;
temp_drqe = dq->qe[dq->host_index].rqe;
@@ -456,10 +487,20 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
/* Ring The Header Receive Queue Doorbell */
if (!(hq->host_index % hq->entry_repost)) {
doorbell.word0 = 0;
- bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
- hq->entry_repost);
- bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
- writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
+ if (hq->db_format == LPFC_DB_RING_FORMAT) {
+ bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
+ hq->entry_repost);
+ bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
+ } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
+ bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
+ hq->entry_repost);
+ bf_set(lpfc_rq_db_list_fm_index, &doorbell,
+ hq->host_index);
+ bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
+ } else {
+ return -EINVAL;
+ }
+ writel(doorbell.word0, hq->db_regaddr);
}
return put_index;
}
@@ -615,7 +656,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
if (!ndlp)
goto out;
- if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
+ if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
rrq->send_rrq = 0;
rrq->xritag = 0;
rrq->rrq_stop_time = 0;
@@ -649,7 +690,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
- next_time = jiffies + HZ * (phba->fc_ratov + 1);
+ next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
list_for_each_entry_safe(rrq, nextrrq,
&phba->active_rrq_list, list) {
if (time_after(jiffies, rrq->rrq_stop_time))
@@ -658,7 +699,8 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
next_time = rrq->rrq_stop_time;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
- if (!list_empty(&phba->active_rrq_list))
+ if ((!list_empty(&phba->active_rrq_list)) &&
+ (!(phba->pport->load_flag & FC_UNLOADING)))
mod_timer(&phba->rrq_tmr, next_time);
list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
list_del(&rrq->list);
@@ -764,7 +806,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
return;
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
- next_time = jiffies + HZ * (phba->fc_ratov * 2);
+ next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
list_splice_init(&phba->active_rrq_list, &rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -772,7 +814,9 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
list_del(&rrq->list);
lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
}
- if (!list_empty(&phba->active_rrq_list))
+ if ((!list_empty(&phba->active_rrq_list)) &&
+ (!(phba->pport->load_flag & FC_UNLOADING)))
+
mod_timer(&phba->rrq_tmr, next_time);
}
@@ -793,7 +837,9 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
{
if (!ndlp)
return 0;
- if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+ if (!ndlp->active_rrqs_xri_bitmap)
+ return 0;
+ if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
return 1;
else
return 0;
@@ -843,7 +889,10 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
goto out;
- if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+ if (!ndlp->active_rrqs_xri_bitmap)
+ goto out;
+
+ if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
goto out;
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -855,14 +904,17 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
xritag, rxid, ndlp->nlp_DID, send_rrq);
return -EINVAL;
}
- rrq->send_rrq = send_rrq;
+ if (phba->cfg_enable_rrq == 1)
+ rrq->send_rrq = send_rrq;
+ else
+ rrq->send_rrq = 0;
rrq->xritag = xritag;
- rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
+ rrq->rrq_stop_time = jiffies +
+ msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
- rrq->send_rrq = send_rrq;
spin_lock_irqsave(&phba->hbalock, iflags);
empty = list_empty(&phba->active_rrq_list);
list_add_tail(&rrq->list, &phba->active_rrq_list);
@@ -906,8 +958,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
} else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
!(piocbq->iocb_flag & LPFC_IO_LIBDFC))
ndlp = piocbq->context_un.ndlp;
- else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
- (piocbq->iocb_flag & LPFC_IO_LIBDFC))
+ else if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
ndlp = piocbq->context_un.ndlp;
else
ndlp = piocbq->context1;
@@ -991,6 +1042,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
else
sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
+
if (sglq) {
if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
(sglq->state != SGL_XRI_ABORTED)) {
@@ -1007,7 +1059,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
&phba->sli4_hba.lpfc_sgl_list);
/* Check if TXQ queue needs to be serviced */
- if (pring->txq_cnt)
+ if (!list_empty(&pring->txq))
lpfc_worker_wake_up(phba);
}
}
@@ -1038,6 +1090,7 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+
/*
* Clean all volatile data fields, preserve iotag and node struct.
*/
@@ -1104,7 +1157,6 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
while (!list_empty(iocblist)) {
list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
-
if (!piocb->iocb_cmpl)
lpfc_sli_release_iocbq(phba, piocb);
else {
@@ -1292,18 +1344,17 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
list_add_tail(&piocb->list, &pring->txcmplq);
piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
- pring->txcmplq_cnt++;
- if (pring->txcmplq_cnt > pring->txcmplq_max)
- pring->txcmplq_max = pring->txcmplq_cnt;
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
- (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+ (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
+ (!(piocb->vport->load_flag & FC_UNLOADING))) {
if (!piocb->vport)
BUG();
else
mod_timer(&piocb->vport->els_tmofunc,
- jiffies + HZ * (phba->fc_ratov << 1));
+ jiffies +
+ msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
}
@@ -1326,8 +1377,6 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
struct lpfc_iocbq *cmd_iocb;
list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
- if (cmd_iocb != NULL)
- pring->txq_cnt--;
return cmd_iocb;
}
@@ -1596,8 +1645,9 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
* (c) link attention events can be processed (fcp ring only)
* (d) IOCB processing is not blocked by the outstanding mbox command.
*/
- if (pring->txq_cnt &&
- lpfc_is_link_up(phba) &&
+
+ if (lpfc_is_link_up(phba) &&
+ (!list_empty(&pring->txq)) &&
(pring->ringno != phba->sli.fcp_ring ||
phba->sli.sli_flag & LPFC_PROCESS_LA)) {
@@ -2305,7 +2355,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
/* Mailbox cmd <cmd> Cmpl <cmpl> */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
- "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+ "x%x x%x x%x\n",
pmb->vport ? pmb->vport->vpi : 0,
pmbox->mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba, pmb),
@@ -2319,7 +2370,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
pmbox->un.varWords[4],
pmbox->un.varWords[5],
pmbox->un.varWords[6],
- pmbox->un.varWords[7]);
+ pmbox->un.varWords[7],
+ pmbox->un.varWords[8],
+ pmbox->un.varWords[9],
+ pmbox->un.varWords[10]);
if (pmb->mbox_cmpl)
pmb->mbox_cmpl(phba,pmb);
@@ -2594,7 +2648,6 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
cmd_iocb = phba->sli.iocbq_lookup[iotag];
list_del_init(&cmd_iocb->list);
if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
- pring->txcmplq_cnt--;
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
}
return cmd_iocb;
@@ -2632,7 +2685,6 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
/* remove from txcmpl queue list */
list_del_init(&cmd_iocb->list);
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
- pring->txcmplq_cnt--;
return cmd_iocb;
}
}
@@ -2875,8 +2927,9 @@ void lpfc_poll_eratt(unsigned long ptr)
lpfc_worker_wake_up(phba);
else
/* Restart the timer for next eratt poll */
- mod_timer(&phba->eratt_poll, jiffies +
- HZ * LPFC_ERATT_POLL_INTERVAL);
+ mod_timer(&phba->eratt_poll,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
return;
}
@@ -3240,7 +3293,7 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (free_saveq) {
list_for_each_entry_safe(rspiocbp, next_iocb,
&saveq->list, list) {
- list_del(&rspiocbp->list);
+ list_del_init(&rspiocbp->list);
__lpfc_sli_release_iocbq(phba, rspiocbp);
}
__lpfc_sli_release_iocbq(phba, saveq);
@@ -3479,15 +3532,27 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
/* Error everything on txq and txcmplq
* First do the txq.
*/
- spin_lock_irq(&phba->hbalock);
- list_splice_init(&pring->txq, &completions);
- pring->txq_cnt = 0;
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ spin_lock_irq(&pring->ring_lock);
+ list_splice_init(&pring->txq, &completions);
+ pring->txq_cnt = 0;
+ spin_unlock_irq(&pring->ring_lock);
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ spin_lock_irq(&phba->hbalock);
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ spin_unlock_irq(&phba->hbalock);
+ } else {
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&pring->txq, &completions);
+ pring->txq_cnt = 0;
- spin_unlock_irq(&phba->hbalock);
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ spin_unlock_irq(&phba->hbalock);
+ }
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
@@ -3495,6 +3560,36 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
}
/**
+ * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in FCP rings and frees all the iocb
+ * objects in txq. This function issues an abort iocb for all the iocb commands
+ * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
+ * the return of this function. The caller is not required to hold any locks.
+ **/
+void
+lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ uint32_t i;
+
+ /* Look on all the FCP Rings for the iotag */
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
+ pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
+ lpfc_sli_abort_iocb_ring(phba, pring);
+ }
+ } else {
+ pring = &psli->ring[psli->fcp_ring];
+ lpfc_sli_abort_iocb_ring(phba, pring);
+ }
+}
+
+
+/**
* lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
* @phba: Pointer to HBA context object.
*
@@ -3511,30 +3606,55 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
LIST_HEAD(txcmplq);
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
-
- /* Currently, only one fcp ring */
- pring = &psli->ring[psli->fcp_ring];
+ uint32_t i;
spin_lock_irq(&phba->hbalock);
- /* Retrieve everything on txq */
- list_splice_init(&pring->txq, &txq);
- pring->txq_cnt = 0;
-
- /* Retrieve everything on the txcmplq */
- list_splice_init(&pring->txcmplq, &txcmplq);
- pring->txcmplq_cnt = 0;
-
/* Indicate the I/O queues are flushed */
phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
- /* Flush the txq */
- lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_DOWN);
+ /* Look on all the FCP Rings for the iotag */
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
+ pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
+
+ spin_lock_irq(&pring->ring_lock);
+ /* Retrieve everything on txq */
+ list_splice_init(&pring->txq, &txq);
+ /* Retrieve everything on the txcmplq */
+ list_splice_init(&pring->txcmplq, &txcmplq);
+ pring->txq_cnt = 0;
+ pring->txcmplq_cnt = 0;
+ spin_unlock_irq(&pring->ring_lock);
+
+ /* Flush the txq */
+ lpfc_sli_cancel_iocbs(phba, &txq,
+ IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ /* Flush the txcmpq */
+ lpfc_sli_cancel_iocbs(phba, &txcmplq,
+ IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ }
+ } else {
+ pring = &psli->ring[psli->fcp_ring];
- /* Flush the txcmpq */
- lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_DOWN);
+ spin_lock_irq(&phba->hbalock);
+ /* Retrieve everything on txq */
+ list_splice_init(&pring->txq, &txq);
+ /* Retrieve everything on the txcmplq */
+ list_splice_init(&pring->txcmplq, &txcmplq);
+ pring->txq_cnt = 0;
+ pring->txcmplq_cnt = 0;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Flush the txq */
+ lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ /* Flush the txcmpq */
+ lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ }
}
/**
@@ -3937,12 +4057,13 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
uint16_t cfg_value;
- int rc;
+ int rc = 0;
/* Reset HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "0295 Reset HBA Data: x%x x%x\n",
- phba->pport->port_state, psli->sli_flag);
+ "0295 Reset HBA Data: x%x x%x x%x\n",
+ phba->pport->port_state, psli->sli_flag,
+ phba->hba_flag);
/* perform board reset */
phba->fc_eventTag = 0;
@@ -3955,6 +4076,12 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
phba->fcf.fcf_flag = 0;
spin_unlock_irq(&phba->hbalock);
+ /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
+ if (phba->hba_flag & HBA_FW_DUMP_OP) {
+ phba->hba_flag &= ~HBA_FW_DUMP_OP;
+ return rc;
+ }
+
/* Now physically reset the device */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0389 Performing PCI function reset!\n");
@@ -4548,7 +4675,8 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2708 This device does not support "
- "Advanced Error Reporting (AER)\n");
+ "Advanced Error Reporting (AER): %d\n",
+ rc);
phba->cfg_aer_support = 0;
}
}
@@ -4939,7 +5067,7 @@ out_free_mboxq:
static void
lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
{
- uint8_t fcp_eqidx;
+ int fcp_eqidx;
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
@@ -4950,12 +5078,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
LPFC_QUEUE_REARM);
} while (++fcp_eqidx < phba->cfg_fcp_io_channel);
}
+
+ if (phba->cfg_fof)
+ lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
+
if (phba->sli4_hba.hba_eq) {
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
fcp_eqidx++)
lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
LPFC_QUEUE_REARM);
}
+
+ if (phba->cfg_fof)
+ lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
}
/**
@@ -5481,6 +5616,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
list_del_init(&rsrc_blk->list);
kfree(rsrc_blk);
}
+ phba->sli4_hba.max_cfg_param.vpi_used = 0;
break;
case LPFC_RSC_TYPE_FCOE_XRI:
kfree(phba->sli4_hba.xri_bmask);
@@ -5622,6 +5758,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
}
/* RPIs. */
count = phba->sli4_hba.max_cfg_param.max_rpi;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3279 Invalid provisioning of "
+ "rpi:%d\n", count);
+ rc = -EINVAL;
+ goto err_exit;
+ }
base = phba->sli4_hba.max_cfg_param.rpi_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->sli4_hba.rpi_bmask = kzalloc(longs *
@@ -5644,6 +5787,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* VPIs. */
count = phba->sli4_hba.max_cfg_param.max_vpi;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3280 Invalid provisioning of "
+ "vpi:%d\n", count);
+ rc = -EINVAL;
+ goto free_rpi_ids;
+ }
base = phba->sli4_hba.max_cfg_param.vpi_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->vpi_bmask = kzalloc(longs *
@@ -5666,6 +5816,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* XRIs. */
count = phba->sli4_hba.max_cfg_param.max_xri;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3281 Invalid provisioning of "
+ "xri:%d\n", count);
+ rc = -EINVAL;
+ goto free_vpi_ids;
+ }
base = phba->sli4_hba.max_cfg_param.xri_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->sli4_hba.xri_bmask = kzalloc(longs *
@@ -5689,6 +5846,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* VFIs. */
count = phba->sli4_hba.max_cfg_param.max_vfi;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3282 Invalid provisioning of "
+ "vfi:%d\n", count);
+ rc = -EINVAL;
+ goto free_xri_ids;
+ }
base = phba->sli4_hba.max_cfg_param.vfi_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->sli4_hba.vfi_bmask = kzalloc(longs *
@@ -5753,6 +5917,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
} else {
kfree(phba->vpi_bmask);
+ phba->sli4_hba.max_cfg_param.vpi_used = 0;
kfree(phba->vpi_ids);
bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
kfree(phba->sli4_hba.xri_bmask);
@@ -5934,7 +6099,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
struct lpfc_sglq *sglq_entry = NULL;
struct lpfc_sglq *sglq_entry_next = NULL;
struct lpfc_sglq *sglq_entry_first = NULL;
- int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
+ int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
int last_xritag = NO_XRI;
LIST_HEAD(prep_sgl_list);
LIST_HEAD(blck_sgl_list);
@@ -5942,10 +6107,11 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
LIST_HEAD(post_sgl_list);
LIST_HEAD(free_sgl_list);
- spin_lock(&phba->hbalock);
+ spin_lock_irq(&phba->hbalock);
list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
- spin_unlock(&phba->hbalock);
+ spin_unlock_irq(&phba->hbalock);
+ total_cnt = phba->sli4_hba.els_xri_cnt;
list_for_each_entry_safe(sglq_entry, sglq_entry_next,
&allc_sgl_list, list) {
list_del_init(&sglq_entry->list);
@@ -5997,9 +6163,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
sglq_entry->sli4_xritag);
list_add_tail(&sglq_entry->list,
&free_sgl_list);
- spin_lock_irq(&phba->hbalock);
- phba->sli4_hba.els_xri_cnt--;
- spin_unlock_irq(&phba->hbalock);
+ total_cnt--;
}
}
}
@@ -6027,9 +6191,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
(sglq_entry_first->sli4_xritag +
post_cnt - 1));
list_splice_init(&blck_sgl_list, &free_sgl_list);
- spin_lock_irq(&phba->hbalock);
- phba->sli4_hba.els_xri_cnt -= post_cnt;
- spin_unlock_irq(&phba->hbalock);
+ total_cnt -= post_cnt;
}
/* don't reset xirtag due to hole in xri block */
@@ -6039,16 +6201,18 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
/* reset els sgl post count for next round of posting */
post_cnt = 0;
}
+ /* update the number of XRIs posted for ELS */
+ phba->sli4_hba.els_xri_cnt = total_cnt;
/* free the els sgls failed to post */
lpfc_free_sgl_list(phba, &free_sgl_list);
/* push els sgls posted to the availble list */
if (!list_empty(&post_sgl_list)) {
- spin_lock(&phba->hbalock);
+ spin_lock_irq(&phba->hbalock);
list_splice_init(&post_sgl_list,
&phba->sli4_hba.lpfc_sgl_list);
- spin_unlock(&phba->hbalock);
+ spin_unlock_irq(&phba->hbalock);
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3161 Failure to post els sgl to port.\n");
@@ -6115,6 +6279,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
kfree(vpd);
goto out_free_mbox;
}
+
mqe = &mboxq->u.mqe;
phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
@@ -6201,6 +6366,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
+ /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
+ rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
+ if (phba->pport->cfg_lun_queue_depth > rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3362 LUN queue depth changed from %d to %d\n",
+ phba->pport->cfg_lun_queue_depth, rc);
+ phba->pport->cfg_lun_queue_depth = rc;
+ }
+
+
/*
* Discover the port's supported feature set and match it against the
* hosts requests.
@@ -6388,16 +6563,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start the ELS watchdog timer */
mod_timer(&vport->els_tmofunc,
- jiffies + HZ * (phba->fc_ratov * 2));
+ jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
- jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
phba->hb_outstanding = 0;
phba->last_completion_time = jiffies;
/* Start error attention (ERATT) polling timer */
- mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+ mod_timer(&phba->eratt_poll,
+ jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
/* Enable PCIe device Advanced Error Reporting (AER) if configured */
if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
@@ -6506,6 +6682,108 @@ lpfc_mbox_timeout(unsigned long ptr)
return;
}
+/**
+ * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
+ * are pending
+ * @phba: Pointer to HBA context object.
+ *
+ * This function checks if any mailbox completions are present on the mailbox
+ * completion queue.
+ **/
+bool
+lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
+{
+
+ uint32_t idx;
+ struct lpfc_queue *mcq;
+ struct lpfc_mcqe *mcqe;
+ bool pending_completions = false;
+
+ if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+ return false;
+
+ /* Check for completions on mailbox completion queue */
+
+ mcq = phba->sli4_hba.mbx_cq;
+ idx = mcq->hba_index;
+ while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
+ mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
+ if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
+ (!bf_get_le32(lpfc_trailer_async, mcqe))) {
+ pending_completions = true;
+ break;
+ }
+ idx = (idx + 1) % mcq->entry_count;
+ if (mcq->hba_index == idx)
+ break;
+ }
+ return pending_completions;
+
+}
+
+/**
+ * lpfc_sli4_process_missed_mbox_completions - process mbox completions
+ * that were missed.
+ * @phba: Pointer to HBA context object.
+ *
+ * For sli4, it is possible to miss an interrupt. As such mbox completions
+ * maybe missed causing erroneous mailbox timeouts to occur. This function
+ * checks to see if mbox completions are on the mailbox completion queue
+ * and will process all the completions associated with the eq for the
+ * mailbox completion queue.
+ **/
+bool
+lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
+{
+
+ uint32_t eqidx;
+ struct lpfc_queue *fpeq = NULL;
+ struct lpfc_eqe *eqe;
+ bool mbox_pending;
+
+ if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+ return false;
+
+ /* Find the eq associated with the mcq */
+
+ if (phba->sli4_hba.hba_eq)
+ for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++)
+ if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
+ phba->sli4_hba.mbx_cq->assoc_qid) {
+ fpeq = phba->sli4_hba.hba_eq[eqidx];
+ break;
+ }
+ if (!fpeq)
+ return false;
+
+ /* Turn off interrupts from this EQ */
+
+ lpfc_sli4_eq_clr_intr(fpeq);
+
+ /* Check to see if a mbox completion is pending */
+
+ mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
+
+ /*
+ * If a mbox completion is pending, process all the events on EQ
+ * associated with the mbox completion queue (this could include
+ * mailbox commands, async events, els commands, receive queue data
+ * and fcp commands)
+ */
+
+ if (mbox_pending)
+ while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+ lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
+ fpeq->EQ_processed++;
+ }
+
+ /* Always clear and re-arm the EQ */
+
+ lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+
+ return mbox_pending;
+
+}
/**
* lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
@@ -6521,7 +6799,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
MAILBOX_t *mb = &pmbox->u.mb;
struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring;
+
+ /* If the mailbox completed, process the completion and return */
+ if (lpfc_sli4_process_missed_mbox_completions(phba))
+ return;
/* Check the pmbox pointer first. There is a race condition
* between the mbox timeout handler getting executed in the
@@ -6559,8 +6840,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
- pring = &psli->ring[psli->fcp_ring];
- lpfc_sli_abort_iocb_ring(phba, pring);
+ lpfc_sli_abort_fcp_rings(phba);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0345 Resetting board due to mailbox timeout\n");
@@ -6599,7 +6879,7 @@ static int
lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
uint32_t flag)
{
- MAILBOX_t *mb;
+ MAILBOX_t *mbx;
struct lpfc_sli *psli = &phba->sli;
uint32_t status, evtctr;
uint32_t ha_copy, hc_copy;
@@ -6653,7 +6933,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
psli = &phba->sli;
- mb = &pmbox->u.mb;
+ mbx = &pmbox->u.mb;
status = MBX_SUCCESS;
if (phba->link_state == LPFC_HBA_ERROR) {
@@ -6668,7 +6948,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
goto out_not_finished;
}
- if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
+ if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
!(hc_copy & HC_MBINT_ENA)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -6722,7 +7002,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
"(%d):0308 Mbox cmd issue - BUSY Data: "
"x%x x%x x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0xffffff,
- mb->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
psli->slistat.mbox_busy++;
@@ -6732,15 +7012,15 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
lpfc_debugfs_disc_trc(pmbox->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX Bsy vport: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX Bsy: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
return MBX_BUSY;
@@ -6751,7 +7031,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
/* If we are not polling, we MUST be in SLI2 mode */
if (flag != MBX_POLL) {
if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
- (mb->mbxCommand != MBX_KILL_BOARD)) {
+ (mbx->mbxCommand != MBX_KILL_BOARD)) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
@@ -6764,8 +7044,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
goto out_not_finished;
}
/* timeout active mbox command */
- mod_timer(&psli->mbox_tmo, (jiffies +
- (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
+ 1000);
+ mod_timer(&psli->mbox_tmo, jiffies + timeout);
}
/* Mailbox cmd <cmd> issue */
@@ -6773,23 +7054,23 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
"x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
- mb->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
- if (mb->mbxCommand != MBX_HEARTBEAT) {
+ if (mbx->mbxCommand != MBX_HEARTBEAT) {
if (pmbox->vport) {
lpfc_debugfs_disc_trc(pmbox->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX Send vport: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX Send: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
}
@@ -6797,12 +7078,12 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
evtctr = psli->slistat.mbox_event;
/* next set own bit for the adapter and copy over command word */
- mb->mbxOwner = OWN_CHIP;
+ mbx->mbxOwner = OWN_CHIP;
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* Populate mbox extension offset word. */
if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
- *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
= (uint8_t *)phba->mbox_ext
- (uint8_t *)phba->mbox;
}
@@ -6814,11 +7095,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
pmbox->in_ext_byte_len);
}
/* Copy command data to host SLIM area */
- lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
} else {
/* Populate mbox extension offset word. */
if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
- *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
= MAILBOX_HBA_EXT_OFFSET;
/* Copy the mailbox extension data */
@@ -6828,24 +7109,24 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
pmbox->context2, pmbox->in_ext_byte_len);
}
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
- lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
}
/* First copy mbox command data to HBA SLIM, skip past first
word */
to_slim = phba->MBslimaddr + sizeof (uint32_t);
- lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
+ lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
MAILBOX_CMD_SIZE - sizeof (uint32_t));
/* Next copy over first word, with mbxOwner set */
- ldata = *((uint32_t *)mb);
+ ldata = *((uint32_t *)mbx);
to_slim = phba->MBslimaddr;
writel(ldata, to_slim);
readl(to_slim); /* flush */
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
/* switch over to host mailbox */
psli->sli_flag |= LPFC_SLI_ACTIVE;
}
@@ -6920,7 +7201,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
/* First copy command data */
word0 = *((uint32_t *)phba->mbox);
word0 = le32_to_cpu(word0);
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
MAILBOX_t *slimmb;
uint32_t slimword0;
/* Check real SLIM for any errors */
@@ -6947,7 +7228,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* copy results back to user */
- lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
if (pmbox->out_ext_byte_len && pmbox->context2) {
lpfc_sli_pcimem_bcopy(phba->mbox_ext,
@@ -6956,7 +7237,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
}
} else {
/* First copy command data */
- lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
+ lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
if (pmbox->out_ext_byte_len && pmbox->context2) {
@@ -6971,7 +7252,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
readl(phba->HAregaddr); /* flush */
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- status = mb->mbxStatus;
+ status = mbx->mbxStatus;
}
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -7016,6 +7297,10 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
1000) + jiffies;
spin_unlock_irq(&phba->hbalock);
+ /* Make sure the mailbox is really active */
+ if (timeout)
+ lpfc_sli4_process_missed_mbox_completions(phba);
+
/* Wait for the outstnading mailbox command to complete */
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
@@ -7438,7 +7723,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
/* Start timer for the mbox_tmo and log some mailbox post messages */
mod_timer(&psli->mbox_tmo, (jiffies +
- (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
+ msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
@@ -7569,7 +7854,6 @@ __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
/* Insert the caller's iocb in the txq tail for later processing. */
list_add_tail(&piocb->list, &pring->txq);
- pring->txq_cnt++;
}
/**
@@ -7857,15 +8141,22 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
static inline uint32_t
lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
{
- int i;
-
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
- i = smp_processor_id();
- else
- i = atomic_add_return(1, &phba->fcp_qidx);
-
- i = (i % phba->cfg_fcp_io_channel);
- return i;
+ struct lpfc_vector_map_info *cpup;
+ int chann, cpu;
+
+ if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
+ && phba->cfg_fcp_io_channel > 1) {
+ cpu = smp_processor_id();
+ if (cpu < phba->sli4_hba.num_present_cpu) {
+ cpup = phba->sli4_hba.cpu_map;
+ cpup += cpu;
+ return cpup->channel_id;
+ }
+ chann = cpu;
+ }
+ chann = atomic_add_return(1, &phba->fcp_qidx);
+ chann = (chann % phba->cfg_fcp_io_channel);
+ return chann;
}
/**
@@ -7917,6 +8208,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
abort_tag = (uint32_t) iocbq->iotag;
xritag = iocbq->sli4_xritag;
wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
+ wqe->generic.wqe_com.word10 = 0;
/* words0-2 bpl convert bde */
if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
@@ -8010,6 +8302,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
+ wqe->els_req.max_response_payload_len = total_len - xmit_len;
break;
case CMD_XMIT_SEQUENCE64_CX:
bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
@@ -8054,8 +8347,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
command_type = FCP_COMMAND_DATA_OUT;
/* word3 iocb=iotag wqe=payload_offset_len */
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- wqe->fcp_iwrite.payload_offset_len =
- xmit_len + sizeof(struct fcp_rsp);
+ bf_set(payload_offset_len, &wqe->fcp_iwrite,
+ xmit_len + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_iwrite,
+ 0);
/* word4 iocb=parameter wqe=total_xfer_length memcpy */
/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
@@ -8069,12 +8364,22 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
break;
case CMD_FCP_IREAD64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- wqe->fcp_iread.payload_offset_len =
- xmit_len + sizeof(struct fcp_rsp);
+ bf_set(payload_offset_len, &wqe->fcp_iread,
+ xmit_len + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_iread,
+ 0);
/* word4 iocb=parameter wqe=total_xfer_length memcpy */
/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
@@ -8088,10 +8393,23 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
break;
case CMD_FCP_ICMND64_CR:
+ /* word3 iocb=iotag wqe=payload_offset_len */
+ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+ bf_set(payload_offset_len, &wqe->fcp_icmd,
+ xmit_len + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_icmd,
+ 0);
/* word3 iocb=IO_TAG wqe=reserved */
- wqe->fcp_icmd.rsrvd3 = 0;
bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
@@ -8103,6 +8421,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
iocbq->iocb.ulpFCP2Rcvy);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
break;
case CMD_GEN_REQUEST64_CR:
/* For this command calculate the xmit length of the
@@ -8137,6 +8463,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
+ wqe->gen_req.max_response_payload_len = total_len - xmit_len;
command_type = OTHER_COMMAND;
break;
case CMD_XMIT_ELS_RSP64_CX:
@@ -8334,6 +8661,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
{
struct lpfc_sglq *sglq;
union lpfc_wqe wqe;
+ struct lpfc_queue *wq;
struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
if (piocb->sli4_xritag == NO_XRI) {
@@ -8341,7 +8669,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
sglq = NULL;
else {
- if (pring->txq_cnt) {
+ if (!list_empty(&pring->txq)) {
if (!(flag & SLI_IOCB_RET_IOCB)) {
__lpfc_sli_ringtx_put(phba,
pring, piocb);
@@ -8370,7 +8698,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
* This is a continuation of a commandi,(CX) so this
* sglq is on the active list
*/
- sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
+ sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
if (!sglq)
return IOCB_ERROR;
}
@@ -8386,11 +8714,17 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return IOCB_ERROR;
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
- (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
- &wqe))
+ (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+ if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) {
+ wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
+ } else {
+ wq = phba->sli4_hba.oas_wq;
+ }
+ if (lpfc_sli4_wq_put(wq, &wqe))
return IOCB_ERROR;
} else {
+ if (unlikely(!phba->sli4_hba.els_wq))
+ return IOCB_ERROR;
if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
return IOCB_ERROR;
}
@@ -8476,12 +8810,20 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
if (phba->sli_rev == LPFC_SLI_REV4) {
if (piocb->iocb_flag & LPFC_IO_FCP) {
- if (unlikely(!phba->sli4_hba.fcp_wq))
- return IOCB_ERROR;
- idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
- piocb->fcp_wqidx = idx;
- ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
-
+ if (!phba->cfg_fof || (!(piocb->iocb_flag &
+ LPFC_IO_OAS))) {
+ if (unlikely(!phba->sli4_hba.fcp_wq))
+ return IOCB_ERROR;
+ idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+ piocb->fcp_wqidx = idx;
+ ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
+ } else {
+ if (unlikely(!phba->sli4_hba.oas_wq))
+ return IOCB_ERROR;
+ idx = 0;
+ piocb->fcp_wqidx = 0;
+ ring_number = LPFC_FCP_OAS_RING;
+ }
pring = &phba->sli.ring[ring_number];
spin_lock_irqsave(&pring->ring_lock, iflags);
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
@@ -8655,7 +8997,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"3116 Port generated FCP XRI ABORT event on "
"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
- ndlp->vport->vpi, ndlp->nlp_rpi,
+ ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
bf_get(lpfc_wcqe_xa_xri, axri),
bf_get(lpfc_wcqe_xa_status, axri),
axri->parameter);
@@ -8855,12 +9197,6 @@ lpfc_sli_setup(struct lpfc_hba *phba)
pring->prt[3].type = FC_TYPE_CT;
pring->prt[3].lpfc_sli_rcv_unsol_event =
lpfc_ct_unsol_event;
- /* abort unsolicited sequence */
- pring->prt[4].profile = 0; /* Mask 4 */
- pring->prt[4].rctl = FC_RCTL_BA_ABTS;
- pring->prt[4].type = FC_TYPE_BLS;
- pring->prt[4].lpfc_sli_rcv_unsol_event =
- lpfc_sli4_ct_abort_unsol_event;
break;
}
totiocbsize += (pring->sli.sli3.numCiocb *
@@ -8909,6 +9245,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
pring->sli.sli3.next_cmdidx = 0;
pring->sli.sli3.local_getidx = 0;
pring->sli.sli3.cmdidx = 0;
+ pring->flag = 0;
INIT_LIST_HEAD(&pring->txq);
INIT_LIST_HEAD(&pring->txcmplq);
INIT_LIST_HEAD(&pring->iocb_continueq);
@@ -9015,7 +9352,6 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
if (iocb->vport != vport)
continue;
list_move_tail(&iocb->list, &completions);
- pring->txq_cnt--;
}
/* Next issue ABTS for everything on the txcmplq */
@@ -9084,8 +9420,6 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
* given to the FW yet.
*/
list_splice_init(&pring->txq, &completions);
- pring->txq_cnt = 0;
-
}
spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -9547,43 +9881,6 @@ abort_iotag_exit:
}
/**
- * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
- * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
- *
- * This function aborts all iocbs in the given ring and frees all the iocb
- * objects in txq. This function issues abort iocbs unconditionally for all
- * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
- * to complete before the return of this function. The caller is not required
- * to hold any locks.
- **/
-static void
-lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
-{
- LIST_HEAD(completions);
- struct lpfc_iocbq *iocb, *next_iocb;
-
- if (pring->ringno == LPFC_ELS_RING)
- lpfc_fabric_abort_hba(phba);
-
- spin_lock_irq(&phba->hbalock);
-
- /* Take off all the iocbs on txq for cancelling */
- list_splice_init(&pring->txq, &completions);
- pring->txq_cnt = 0;
-
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_abort_iotag_issue(phba, pring, iocb);
-
- spin_unlock_irq(&phba->hbalock);
-
- /* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
-}
-
-/**
* lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
* @phba: pointer to lpfc HBA data structure.
*
@@ -9598,7 +9895,7 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
- lpfc_sli_iocb_ring_abort(phba, pring);
+ lpfc_sli_abort_iocb_ring(phba, pring);
}
}
@@ -9720,7 +10017,7 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3096 ABORT_XRI_CN completing on xri x%x "
+ "3096 ABORT_XRI_CN completing on rpi x%x "
"original iotag x%x, abort cmd iotag x%x "
"status 0x%x, reason 0x%x\n",
cmdiocb->iocb.un.acxri.abortContextTag,
@@ -9770,6 +10067,13 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abort_cmd) != 0)
continue;
+ /*
+ * If the iocbq is already being aborted, don't take a second
+ * action, but do count it.
+ */
+ if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+ continue;
+
/* issue ABTS for this IOCB based on iotag */
abtsiocb = lpfc_sli_get_iocbq(phba);
if (abtsiocb == NULL) {
@@ -9777,6 +10081,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
continue;
}
+ /* indicate the IO is being aborted by the driver. */
+ iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+
cmd = &iocbq->iocb;
abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
@@ -9786,7 +10093,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
abtsiocb->iocb.ulpLe = 1;
abtsiocb->iocb.ulpClass = cmd->ulpClass;
- abtsiocb->vport = phba->pport;
+ abtsiocb->vport = vport;
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
@@ -9813,6 +10120,124 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
}
/**
+ * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
+ * @vport: Pointer to virtual port.
+ * @pring: Pointer to driver SLI ring object.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ *
+ * This function sends an abort command for every SCSI command
+ * associated with the given virtual port pending on the ring
+ * filtered by lpfc_sli_validate_fcp_iocb function.
+ * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
+ * FCP iocbs associated with lun specified by tgt_id and lun_id
+ * parameters
+ * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
+ * FCP iocbs associated with SCSI target specified by tgt_id parameter.
+ * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
+ * FCP iocbs associated with virtual port.
+ * This function returns number of iocbs it aborted .
+ * This function is called with no locks held right after a taskmgmt
+ * command is sent.
+ **/
+int
+lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
+ uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *abtsiocbq;
+ struct lpfc_iocbq *iocbq;
+ IOCB_t *icmd;
+ int sum, i, ret_val;
+ unsigned long iflags;
+ struct lpfc_sli_ring *pring_s4;
+ uint32_t ring_number;
+
+ spin_lock_irq(&phba->hbalock);
+
+ /* all I/Os are in process of being flushed */
+ if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+ }
+ sum = 0;
+
+ for (i = 1; i <= phba->sli.last_iotag; i++) {
+ iocbq = phba->sli.iocbq_lookup[i];
+
+ if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
+ cmd) != 0)
+ continue;
+
+ /*
+ * If the iocbq is already being aborted, don't take a second
+ * action, but do count it.
+ */
+ if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+ continue;
+
+ /* issue ABTS for this IOCB based on iotag */
+ abtsiocbq = __lpfc_sli_get_iocbq(phba);
+ if (abtsiocbq == NULL)
+ continue;
+
+ icmd = &iocbq->iocb;
+ abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
+ abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ abtsiocbq->iocb.un.acxri.abortIoTag =
+ iocbq->sli4_xritag;
+ else
+ abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
+ abtsiocbq->iocb.ulpLe = 1;
+ abtsiocbq->iocb.ulpClass = icmd->ulpClass;
+ abtsiocbq->vport = vport;
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx;
+ if (iocbq->iocb_flag & LPFC_IO_FCP)
+ abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
+
+ if (lpfc_is_link_up(phba))
+ abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
+ else
+ abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
+
+ /* Setup callback routine and issue the command. */
+ abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+
+ /*
+ * Indicate the IO is being aborted by the driver and set
+ * the caller's flag into the aborted IO.
+ */
+ iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ ring_number = MAX_SLI3_CONFIGURED_RINGS +
+ iocbq->fcp_wqidx;
+ pring_s4 = &phba->sli.ring[ring_number];
+ /* Note: both hbalock and ring_lock must be set here */
+ spin_lock_irqsave(&pring_s4->ring_lock, iflags);
+ ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
+ abtsiocbq, 0);
+ spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
+ } else {
+ ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
+ abtsiocbq, 0);
+ }
+
+
+ if (ret_val == IOCB_ERROR)
+ __lpfc_sli_release_iocbq(phba, abtsiocbq);
+ else
+ sum++;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ return sum;
+}
+
+/**
* lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
* @phba: Pointer to HBA context object.
* @cmdiocbq: Pointer to command iocb.
@@ -9839,6 +10264,24 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
struct lpfc_scsi_buf *lpfc_cmd;
spin_lock_irqsave(&phba->hbalock, iflags);
+ if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
+
+ /*
+ * A time out has occurred for the iocb. If a time out
+ * completion handler has been supplied, call it. Otherwise,
+ * just free the iocbq.
+ */
+
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
+ cmdiocbq->wait_iocb_cmpl = NULL;
+ if (cmdiocbq->iocb_cmpl)
+ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
+ else
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ return;
+ }
+
cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
if (cmdiocbq->context2 && rspiocbq)
memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
@@ -9894,10 +10337,16 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba,
* @timeout: Timeout in number of seconds.
*
* This function issues the iocb to firmware and waits for the
- * iocb to complete. If the iocb command is not
- * completed within timeout seconds, it returns IOCB_TIMEDOUT.
- * Caller should not free the iocb resources if this function
- * returns IOCB_TIMEDOUT.
+ * iocb to complete. The iocb_cmpl field of the shall be used
+ * to handle iocbs which time out. If the field is NULL, the
+ * function shall free the iocbq structure. If more clean up is
+ * needed, the caller is expected to provide a completion function
+ * that will provide the needed clean up. If the iocb command is
+ * not completed within timeout seconds, the function will either
+ * free the iocbq structure (if iocb_cmpl == NULL) or execute the
+ * completion function set in the iocb_cmpl field and then return
+ * a status of IOCB_TIMEDOUT. The caller should not free the iocb
+ * resources if this function returns IOCB_TIMEDOUT.
* The function waits for the iocb completion using an
* non-interruptible wait.
* This function will sleep while waiting for iocb completion.
@@ -9926,7 +10375,13 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
long timeleft, timeout_req = 0;
int retval = IOCB_SUCCESS;
uint32_t creg_val;
+ struct lpfc_iocbq *iocb;
+ int txq_cnt = 0;
+ int txcmplq_cnt = 0;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ unsigned long iflags;
+ bool iocb_completed = true;
+
/*
* If the caller has provided a response iocbq buffer, then context2
* is NULL or its an error.
@@ -9937,9 +10392,10 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
piocb->context2 = prspiocbq;
}
+ piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
piocb->context_un.wait_queue = &done_q;
- piocb->iocb_flag &= ~LPFC_IO_WAKE;
+ piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
if (lpfc_readl(phba->HCregaddr, &creg_val))
@@ -9952,14 +10408,30 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
SLI_IOCB_RET_IOCB);
if (retval == IOCB_SUCCESS) {
- timeout_req = timeout * HZ;
+ timeout_req = msecs_to_jiffies(timeout * 1000);
timeleft = wait_event_timeout(done_q,
lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
- if (piocb->iocb_flag & LPFC_IO_WAKE) {
+ /*
+ * IOCB timed out. Inform the wake iocb wait
+ * completion function and set local status
+ */
+
+ iocb_completed = false;
+ piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (iocb_completed) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0331 IOCB wake signaled\n");
+ /* Note: we are not indicating if the IOCB has a success
+ * status or not - that's for the caller to check.
+ * IOCB_SUCCESS means just that the command was sent and
+ * completed. Not that it completed successfully.
+ * */
} else if (timeleft == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0338 IOCB wait timeout error - no "
@@ -9973,9 +10445,17 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
retval = IOCB_TIMEDOUT;
}
} else if (retval == IOCB_BUSY) {
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
- phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
+ if (phba->cfg_log_verbose & LOG_SLI) {
+ list_for_each_entry(iocb, &pring->txq, list) {
+ txq_cnt++;
+ }
+ list_for_each_entry(iocb, &pring->txcmplq, list) {
+ txcmplq_cnt++;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
+ phba->iocb_cnt, txq_cnt, txcmplq_cnt);
+ }
return retval;
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -10031,12 +10511,13 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
uint32_t timeout)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
+ MAILBOX_t *mb = NULL;
int retval;
unsigned long flag;
- /* The caller must leave context1 empty. */
+ /* The caller might set context1 for extended buffer */
if (pmboxq->context1)
- return MBX_NOT_FINISHED;
+ mb = (MAILBOX_t *)pmboxq->context1;
pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
/* setup wake call as IOCB callback */
@@ -10049,22 +10530,25 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
wait_event_interruptible_timeout(done_q,
pmboxq->mbox_flag & LPFC_MBX_WAKE,
- timeout * HZ);
+ msecs_to_jiffies(timeout * 1000));
spin_lock_irqsave(&phba->hbalock, flag);
- pmboxq->context1 = NULL;
+ /* restore the possible extended buffer for free resource */
+ pmboxq->context1 = (uint8_t *)mb;
/*
* if LPFC_MBX_WAKE flag is set the mailbox is completed
* else do not free the resources.
*/
if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
retval = MBX_SUCCESS;
- lpfc_sli4_swap_str(phba, pmboxq);
} else {
retval = MBX_TIMEOUT;
pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
spin_unlock_irqrestore(&phba->hbalock, flag);
+ } else {
+ /* restore the possible extended buffer for free resource */
+ pmboxq->context1 = (uint8_t *)mb;
}
return retval;
@@ -10949,8 +11433,11 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
struct lpfc_iocbq *pIocbOut,
struct lpfc_wcqe_complete *wcqe)
{
+ int numBdes, i;
unsigned long iflags;
- uint32_t status;
+ uint32_t status, max_response;
+ struct lpfc_dmabuf *dmabuf;
+ struct ulp_bde64 *bpl, bde;
size_t offset = offsetof(struct lpfc_iocbq, iocb);
memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
@@ -10967,7 +11454,36 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
else {
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
+ switch (pIocbOut->iocb.ulpCommand) {
+ case CMD_ELS_REQUEST64_CR:
+ dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+ bpl = (struct ulp_bde64 *)dmabuf->virt;
+ bde.tus.w = le32_to_cpu(bpl[1].tus.w);
+ max_response = bde.tus.f.bdeSize;
+ break;
+ case CMD_GEN_REQUEST64_CR:
+ max_response = 0;
+ if (!pIocbOut->context3)
+ break;
+ numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
+ sizeof(struct ulp_bde64);
+ dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+ bpl = (struct ulp_bde64 *)dmabuf->virt;
+ for (i = 0; i < numBdes; i++) {
+ bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+ if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+ max_response += bde.tus.f.bdeSize;
+ }
+ break;
+ default:
+ max_response = wcqe->total_data_placed;
+ break;
+ }
+ if (max_response < wcqe->total_data_placed)
+ pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
+ else
+ pIocbIn->iocb.un.genreq64.bdl.bdeSize =
+ wcqe->total_data_placed;
}
/* Convert BG errors for completion status */
@@ -11258,16 +11774,25 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_iocbq *irspiocbq;
unsigned long iflags;
struct lpfc_sli_ring *pring = cq->pring;
+ int txq_cnt = 0;
+ int txcmplq_cnt = 0;
+ int fcp_txcmplq_cnt = 0;
/* Get an irspiocbq for later ELS response processing use */
irspiocbq = lpfc_sli_get_iocbq(phba);
if (!irspiocbq) {
+ if (!list_empty(&pring->txq))
+ txq_cnt++;
+ if (!list_empty(&pring->txcmplq))
+ txcmplq_cnt++;
+ if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
+ fcp_txcmplq_cnt++;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
"fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
- pring->txq_cnt, phba->iocb_cnt,
- phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
- phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
+ txq_cnt, phba->iocb_cnt,
+ fcp_txcmplq_cnt,
+ txcmplq_cnt);
return false;
}
@@ -11838,6 +12363,175 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
}
+
+/**
+ * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
+ * entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the Flash Optimized Fabric
+ * event queue. It will check the MajorCode and MinorCode to determine this
+ * is for a completion event on a completion queue, if not, an error shall be
+ * logged and just return. Otherwise, it will get to the corresponding
+ * completion queue and process all the entries on the completion queue, rearm
+ * the completion queue, and then return.
+ **/
+static void
+lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+{
+ struct lpfc_queue *cq;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ uint16_t cqid;
+ int ecount = 0;
+
+ if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9147 Not a valid completion "
+ "event: majorcode=x%x, minorcode=x%x\n",
+ bf_get_le32(lpfc_eqe_major_code, eqe),
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
+ return;
+ }
+
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+ /* Next check for OAS */
+ cq = phba->sli4_hba.oas_cq;
+ if (unlikely(!cq)) {
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9148 OAS completion queue "
+ "does not exist\n");
+ return;
+ }
+
+ if (unlikely(cqid != cq->queue_id)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9149 Miss-matched fast-path compl "
+ "queue id: eqcqid=%d, fcpcqid=%d\n",
+ cqid, cq->queue_id);
+ return;
+ }
+
+ /* Process all the entries to the OAS CQ */
+ while ((cqe = lpfc_sli4_cq_get(cq))) {
+ workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+ if (!(++ecount % cq->entry_repost))
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+ }
+
+ /* Track the max number of CQEs processed in 1 EQ */
+ if (ecount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ecount;
+
+ /* Catch the no cq entry condition */
+ if (unlikely(ecount == 0))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9153 No entry from fast-path completion "
+ "queue fcpcqid=%d\n", cq->queue_id);
+
+ /* In any case, flash and re-arm the CQ */
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+ /* wake up worker thread if there are works to be done */
+ if (workposted)
+ lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
+ * IOCB ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The Flash Optimized Fabric ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the EQ to CQ are one-to-one map such that the EQ index is
+ * equal to that of CQ index.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+ struct lpfc_queue *eq;
+ struct lpfc_eqe *eqe;
+ unsigned long iflag;
+ int ecount = 0;
+ uint32_t eqidx;
+
+ /* Get the driver's phba structure from the dev_id */
+ fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
+ phba = fcp_eq_hdl->phba;
+ eqidx = fcp_eq_hdl->idx;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /* Get to the EQ struct associated with this vector */
+ eq = phba->sli4_hba.fof_eq;
+ if (unlikely(!eq))
+ return IRQ_NONE;
+
+ /* Check device state for handling interrupt */
+ if (unlikely(lpfc_intr_state_check(phba))) {
+ eq->EQ_badstate++;
+ /* Check again for link_state with lock held */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (phba->link_state < LPFC_LINK_DOWN)
+ /* Flush, clear interrupt, and rearm the EQ */
+ lpfc_sli4_eq_flush(phba, eq);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Process all the event on FCP fast-path EQ
+ */
+ while ((eqe = lpfc_sli4_eq_get(eq))) {
+ lpfc_sli4_fof_handle_eqe(phba, eqe);
+ if (!(++ecount % eq->entry_repost))
+ lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
+ eq->EQ_processed++;
+ }
+
+ /* Track the max number of EQEs processed in 1 intr */
+ if (ecount > eq->EQ_max_eqe)
+ eq->EQ_max_eqe = ecount;
+
+
+ if (unlikely(ecount == 0)) {
+ eq->EQ_no_entry++;
+
+ if (phba->intr_type == MSIX)
+ /* MSI-X treated interrupt served as no EQ share INT */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "9145 MSI-X interrupt with no EQE\n");
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9146 ISR interrupt with no EQE\n");
+ /* Non MSI-X treated on interrupt as EQ share INT */
+ return IRQ_NONE;
+ }
+ }
+ /* Always clear and re-arm the fast-path EQ */
+ lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+ return IRQ_HANDLED;
+}
+
/**
* lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
* @irq: Interrupt number.
@@ -11873,7 +12567,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
struct lpfc_eqe *eqe;
unsigned long iflag;
int ecount = 0;
- uint32_t fcp_eqidx;
+ int fcp_eqidx;
/* Get the driver's phba structure from the dev_id */
fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
@@ -11975,7 +12669,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
struct lpfc_hba *phba;
irqreturn_t hba_irq_rc;
bool hba_handled = false;
- uint32_t fcp_eqidx;
+ int fcp_eqidx;
/* Get the driver's phba structure from the dev_id */
phba = (struct lpfc_hba *)dev_id;
@@ -11993,6 +12687,13 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
hba_handled |= true;
}
+ if (phba->cfg_fof) {
+ hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
+ &phba->sli4_hba.fcp_eq_hdl[0]);
+ if (hba_irq_rc == IRQ_HANDLED)
+ hba_handled |= true;
+ }
+
return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
} /* lpfc_sli4_intr_handler */
@@ -12097,6 +12798,38 @@ out_fail:
}
/**
+ * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @pci_barset: PCI BAR set flag.
+ *
+ * This function shall perform iomap of the specified PCI BAR address to host
+ * memory address if not already done so and return it. The returned host
+ * memory address can be NULL.
+ */
+static void __iomem *
+lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
+{
+ struct pci_dev *pdev;
+
+ if (!phba->pcidev)
+ return NULL;
+ else
+ pdev = phba->pcidev;
+
+ switch (pci_barset) {
+ case WQ_PCI_BAR_0_AND_1:
+ return phba->pci_bar0_memmap_p;
+ case WQ_PCI_BAR_2_AND_3:
+ return phba->pci_bar2_memmap_p;
+ case WQ_PCI_BAR_4_AND_5:
+ return phba->pci_bar4_memmap_p;
+ default:
+ break;
+ }
+ return NULL;
+}
+
+/**
* lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
* @phba: HBA structure that indicates port to create a queue on.
* @startq: The starting FCP EQ to modify
@@ -12673,6 +13406,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
struct dma_address *page;
+ void __iomem *bar_memmap_p;
+ uint32_t db_offset;
+ uint16_t pci_barset;
/* sanity check on queue memory */
if (!wq || !cq)
@@ -12694,9 +13430,44 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
wq->page_count);
bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
cq->queue_id);
+
+ /* wqv is the earliest version supported, NOT the latest */
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.wqv);
- if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
+
+ switch (phba->sli4_hba.pc_sli4_params.wqv) {
+ case LPFC_Q_CREATE_VERSION_0:
+ switch (wq->entry_size) {
+ default:
+ case 64:
+ /* Nothing to do, version 0 ONLY supports 64 byte */
+ page = wq_create->u.request.page;
+ break;
+ case 128:
+ if (!(phba->sli4_hba.pc_sli4_params.wqsize &
+ LPFC_WQ_SZ128_SUPPORT)) {
+ status = -ERANGE;
+ goto out;
+ }
+ /* If we get here the HBA MUST also support V1 and
+ * we MUST use it
+ */
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ LPFC_Q_CREATE_VERSION_1);
+
+ bf_set(lpfc_mbx_wq_create_wqe_count,
+ &wq_create->u.request_1, wq->entry_count);
+ bf_set(lpfc_mbx_wq_create_wqe_size,
+ &wq_create->u.request_1,
+ LPFC_WQ_WQE_SIZE_128);
+ bf_set(lpfc_mbx_wq_create_page_size,
+ &wq_create->u.request_1,
+ (PAGE_SIZE/SLI4_PAGE_SIZE));
+ page = wq_create->u.request_1.page;
+ break;
+ }
+ break;
+ case LPFC_Q_CREATE_VERSION_1:
bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
wq->entry_count);
switch (wq->entry_size) {
@@ -12707,6 +13478,11 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
LPFC_WQ_WQE_SIZE_64);
break;
case 128:
+ if (!(phba->sli4_hba.pc_sli4_params.wqsize &
+ LPFC_WQ_SZ128_SUPPORT)) {
+ status = -ERANGE;
+ goto out;
+ }
bf_set(lpfc_mbx_wq_create_wqe_size,
&wq_create->u.request_1,
LPFC_WQ_WQE_SIZE_128);
@@ -12715,14 +13491,21 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
(PAGE_SIZE/SLI4_PAGE_SIZE));
page = wq_create->u.request_1.page;
- } else {
- page = wq_create->u.request.page;
+ break;
+ default:
+ status = -ERANGE;
+ goto out;
}
+
list_for_each_entry(dmabuf, &wq->page_list, list) {
memset(dmabuf->virt, 0, hw_page_size);
page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
}
+
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+ bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
+
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -12740,6 +13523,48 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
status = -ENXIO;
goto out;
}
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
+ wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
+ &wq_create->u.response);
+ if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
+ (wq->db_format != LPFC_DB_RING_FORMAT)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3265 WQ[%d] doorbell format not "
+ "supported: x%x\n", wq->queue_id,
+ wq->db_format);
+ status = -EINVAL;
+ goto out;
+ }
+ pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
+ &wq_create->u.response);
+ bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
+ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3263 WQ[%d] failed to memmap pci "
+ "barset:x%x\n", wq->queue_id,
+ pci_barset);
+ status = -ENOMEM;
+ goto out;
+ }
+ db_offset = wq_create->u.response.doorbell_offset;
+ if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
+ (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3252 WQ[%d] doorbell offset not "
+ "supported: x%x\n", wq->queue_id,
+ db_offset);
+ status = -EINVAL;
+ goto out;
+ }
+ wq->db_regaddr = bar_memmap_p + db_offset;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3264 WQ[%d]: barset:x%x, offset:x%x, "
+ "format:x%x\n", wq->queue_id, pci_barset,
+ db_offset, wq->db_format);
+ } else {
+ wq->db_format = LPFC_DB_LIST_FORMAT;
+ wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
+ }
wq->type = LPFC_WQ;
wq->assoc_qid = cq->queue_id;
wq->subtype = subtype;
@@ -12816,6 +13641,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ void __iomem *bar_memmap_p;
+ uint32_t db_offset;
+ uint16_t pci_barset;
/* sanity check on queue memory */
if (!hrq || !drq || !cq)
@@ -12894,6 +13722,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+ bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
+
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -12911,6 +13742,51 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
status = -ENXIO;
goto out;
}
+
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
+ hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
+ &rq_create->u.response);
+ if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
+ (hrq->db_format != LPFC_DB_RING_FORMAT)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3262 RQ [%d] doorbell format not "
+ "supported: x%x\n", hrq->queue_id,
+ hrq->db_format);
+ status = -EINVAL;
+ goto out;
+ }
+
+ pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
+ &rq_create->u.response);
+ bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
+ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3269 RQ[%d] failed to memmap pci "
+ "barset:x%x\n", hrq->queue_id,
+ pci_barset);
+ status = -ENOMEM;
+ goto out;
+ }
+
+ db_offset = rq_create->u.response.doorbell_offset;
+ if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
+ (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3270 RQ[%d] doorbell offset not "
+ "supported: x%x\n", hrq->queue_id,
+ db_offset);
+ status = -EINVAL;
+ goto out;
+ }
+ hrq->db_regaddr = bar_memmap_p + db_offset;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
+ "format:x%x\n", hrq->queue_id, pci_barset,
+ db_offset, hrq->db_format);
+ } else {
+ hrq->db_format = LPFC_DB_RING_FORMAT;
+ hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
+ }
hrq->type = LPFC_HRQ;
hrq->assoc_qid = cq->queue_id;
hrq->subtype = subtype;
@@ -12976,6 +13852,8 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+ bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
@@ -13754,13 +14632,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
}
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2538 Received frame rctl:%s type:%s "
- "Frame Data:%08x %08x %08x %08x %08x %08x\n",
- rctl_names[fc_hdr->fh_r_ctl],
- type_names[fc_hdr->fh_type],
+ "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
+ "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
+ rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
+ type_names[fc_hdr->fh_type], fc_hdr->fh_type,
be32_to_cpu(header[0]), be32_to_cpu(header[1]),
be32_to_cpu(header[2]), be32_to_cpu(header[3]),
- be32_to_cpu(header[4]), be32_to_cpu(header[5]));
+ be32_to_cpu(header[4]), be32_to_cpu(header[5]),
+ be32_to_cpu(header[6]));
return 0;
drop:
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@@ -14063,6 +14942,40 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
}
/**
+ * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
+ * @vport: pointer to a vitural port
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function tries to abort from the assembed sequence from upper level
+ * protocol, described by the information from basic abbort @dmabuf. It
+ * checks to see whether such pending context exists at upper level protocol.
+ * If so, it shall clean up the pending context.
+ *
+ * Return
+ * true -- if there is matching pending context of the sequence cleaned
+ * at ulp;
+ * false -- if there is no matching pending context of the sequence present
+ * at ulp.
+ **/
+static bool
+lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
+{
+ struct lpfc_hba *phba = vport->phba;
+ int handled;
+
+ /* Accepting abort at ulp with SLI4 only */
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return false;
+
+ /* Register all caring upper level protocols to attend abort */
+ handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
+ if (handled)
+ return true;
+
+ return false;
+}
+
+/**
* lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
* @phba: Pointer to HBA context object.
* @cmd_iocbq: pointer to the command iocbq structure.
@@ -14077,8 +14990,14 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmd_iocbq,
struct lpfc_iocbq *rsp_iocbq)
{
- if (cmd_iocbq)
+ struct lpfc_nodelist *ndlp;
+
+ if (cmd_iocbq) {
+ ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
+ lpfc_nlp_put(ndlp);
+ lpfc_nlp_not_used(ndlp);
lpfc_sli_release_iocbq(phba, cmd_iocbq);
+ }
/* Failure means BLS ABORT RSP did not get delivered to remote node*/
if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
@@ -14118,9 +15037,10 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
* event after aborting the sequence handling.
**/
static void
-lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
- struct fc_frame_header *fc_hdr)
+lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
+ struct fc_frame_header *fc_hdr, bool aborted)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *ctiocb = NULL;
struct lpfc_nodelist *ndlp;
uint16_t oxid, rxid, xri, lxri;
@@ -14135,12 +15055,27 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
rxid = be16_to_cpu(fc_hdr->fh_rx_id);
- ndlp = lpfc_findnode_did(phba->pport, sid);
+ ndlp = lpfc_findnode_did(vport, sid);
if (!ndlp) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
- "1268 Find ndlp returned NULL for oxid:x%x "
- "SID:x%x\n", oxid, sid);
- return;
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "1268 Failed to allocate ndlp for "
+ "oxid:x%x SID:x%x\n", oxid, sid);
+ return;
+ }
+ lpfc_nlp_init(vport, ndlp, sid);
+ /* Put ndlp onto pport node list */
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ /* re-setup ndlp without removing from node list */
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "3275 Failed to active ndlp found "
+ "for oxid:x%x SID:x%x\n", oxid, sid);
+ return;
+ }
}
/* Allocate buffer for rsp iocb */
@@ -14164,7 +15099,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- ctiocb->context1 = ndlp;
+ ctiocb->context1 = lpfc_nlp_get(ndlp);
ctiocb->iocb_cmpl = NULL;
ctiocb->vport = phba->pport;
@@ -14183,14 +15118,24 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
if (lxri != NO_XRI)
lpfc_set_rrq_active(phba, ndlp, lxri,
(xri == oxid) ? rxid : oxid, 0);
- /* If the oxid maps to the FCP XRI range or if it is out of range,
- * send a BLS_RJT. The driver no longer has that exchange.
- * Override the IOCB for a BA_RJT.
+ /* For BA_ABTS from exchange responder, if the logical xri with
+ * the oxid maps to the FCP XRI range, the port no longer has
+ * that exchange context, send a BLS_RJT. Override the IOCB for
+ * a BA_RJT.
+ */
+ if ((fctl & FC_FC_EX_CTX) &&
+ (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
+ icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
+ bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
+ bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
+ bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ }
+
+ /* If BA_ABTS failed to abort a partially assembled receive sequence,
+ * the driver no longer has that exchange, send a BLS_RJT. Override
+ * the IOCB for a BA_RJT.
*/
- if (xri > (phba->sli4_hba.max_cfg_param.max_xri +
- phba->sli4_hba.max_cfg_param.xri_base) ||
- xri > (lpfc_sli4_get_els_iocb_cnt(phba) +
- phba->sli4_hba.max_cfg_param.xri_base)) {
+ if (aborted == false) {
icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
@@ -14214,17 +15159,19 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
/* Xmit CT abts response on exchange <xid> */
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
+ icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
if (rc == IOCB_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "2925 Failed to issue CT ABTS RSP x%x on "
- "xri x%x, Data x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid,
- phba->link_state);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2925 Failed to issue CT ABTS RSP x%x on "
+ "xri x%x, Data x%x\n",
+ icmd->un.xseq64.w5.hcsw.Rctl, oxid,
+ phba->link_state);
+ lpfc_nlp_put(ndlp);
+ ctiocb->context1 = NULL;
lpfc_sli_release_iocbq(phba, ctiocb);
}
}
@@ -14249,32 +15196,25 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
struct lpfc_hba *phba = vport->phba;
struct fc_frame_header fc_hdr;
uint32_t fctl;
- bool abts_par;
+ bool aborted;
/* Make a copy of fc_hdr before the dmabuf being released */
memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
if (fctl & FC_FC_EX_CTX) {
- /*
- * ABTS sent by responder to exchange, just free the buffer
- */
- lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ /* ABTS by responder to exchange, no cleanup needed */
+ aborted = true;
} else {
- /*
- * ABTS sent by initiator to exchange, need to do cleanup
- */
- /* Try to abort partially assembled seq */
- abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
-
- /* Send abort to ULP if partially seq abort failed */
- if (abts_par == false)
- lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
- else
- lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ /* ABTS by initiator to exchange, need to do cleanup */
+ aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
+ if (aborted == false)
+ aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
}
- /* Send basic accept (BA_ACC) to the abort requester */
- lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+
+ /* Respond with BA_ACC or BA_RJT accordingly */
+ lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
}
/**
@@ -14377,14 +15317,20 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq->iocb.unsli3.rcvsli3.vpi =
vport->phba->vpi_ids[vport->vpi];
/* put the first buffer into the first IOCBq */
+ tot_len = bf_get(lpfc_rcqe_length,
+ &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
first_iocbq->context2 = &seq_dmabuf->dbuf;
first_iocbq->context3 = NULL;
first_iocbq->iocb.ulpBdeCount = 1;
- first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+ if (tot_len > LPFC_DATA_BUF_SIZE)
+ first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
+ else
+ first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
+
first_iocbq->iocb.un.rcvels.remoteID = sid;
- tot_len = bf_get(lpfc_rcqe_length,
- &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
}
iocbq = first_iocbq;
@@ -14400,14 +15346,17 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
if (!iocbq->context3) {
iocbq->context3 = d_buf;
iocbq->iocb.ulpBdeCount++;
- pbde = (struct ulp_bde64 *)
- &iocbq->iocb.unsli3.sli3Words[4];
- pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
-
/* We need to get the size out of the right CQE */
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
len = bf_get(lpfc_rcqe_length,
&hbq_buf->cq_event.cqe.rcqe_cmpl);
+ pbde = (struct ulp_bde64 *)
+ &iocbq->iocb.unsli3.sli3Words[4];
+ if (len > LPFC_DATA_BUF_SIZE)
+ pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
+ else
+ pbde->tus.f.bdeSize = len;
+
iocbq->iocb.unsli3.rcvsli3.acc_len += len;
tot_len += len;
} else {
@@ -14422,16 +15371,19 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
lpfc_in_buf_free(vport->phba, d_buf);
continue;
}
+ /* We need to get the size out of the right CQE */
+ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ len = bf_get(lpfc_rcqe_length,
+ &hbq_buf->cq_event.cqe.rcqe_cmpl);
iocbq->context2 = d_buf;
iocbq->context3 = NULL;
iocbq->iocb.ulpBdeCount = 1;
- iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+ if (len > LPFC_DATA_BUF_SIZE)
+ iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
+ else
+ iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
- /* We need to get the size out of the right CQE */
- hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
- len = bf_get(lpfc_rcqe_length,
- &hbq_buf->cq_event.cqe.rcqe_cmpl);
tot_len += len;
iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
@@ -14713,6 +15665,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
uint16_t max_rpi, rpi_limit;
uint16_t rpi_remaining, lrpi = 0;
struct lpfc_rpi_hdr *rpi_hdr;
+ unsigned long iflag;
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
rpi_limit = phba->sli4_hba.next_rpi;
@@ -14721,7 +15674,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* Fetch the next logical rpi. Because this index is logical,
* the driver starts at 0 each time.
*/
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflag);
rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
@@ -14737,7 +15690,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
*/
if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
(phba->sli4_hba.rpi_count >= max_rpi)) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return rpi;
}
@@ -14746,7 +15699,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* extents.
*/
if (!phba->sli4_hba.rpi_hdrs_in_use) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return rpi;
}
@@ -14757,7 +15710,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* how many are supported max by the device.
*/
rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
if (!rpi_hdr) {
@@ -15232,11 +16185,18 @@ lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
LPFC_SLI4_FCF_TBL_INDX_MAX);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"3060 Last IDX %d\n", last_index);
- if (list_empty(&phba->fcf.fcf_pri_list)) {
+
+ /* Verify the priority list has 2 or more entries */
+ spin_lock_irq(&phba->hbalock);
+ if (list_empty(&phba->fcf.fcf_pri_list) ||
+ list_is_singular(&phba->fcf.fcf_pri_list)) {
+ spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"3061 Last IDX %d\n", last_index);
return 0; /* Empty rr list */
}
+ spin_unlock_irq(&phba->hbalock);
+
next_fcf_pri = 0;
/*
* Clear the rr_bmask and set all of the bits that are at this
@@ -15307,10 +16267,13 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
{
uint16_t next_fcf_index;
+initial_priority:
/* Search start from next bit of currently registered FCF index */
+ next_fcf_index = phba->fcf.current_rec.fcf_indx;
+
next_priority:
- next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
- LPFC_SLI4_FCF_TBL_INDX_MAX;
+ /* Determine the next fcf index to check */
+ next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
LPFC_SLI4_FCF_TBL_INDX_MAX,
next_fcf_index);
@@ -15337,7 +16300,7 @@ next_priority:
* at that level and continue the selection process.
*/
if (lpfc_check_next_fcf_pri_level(phba))
- goto next_priority;
+ goto initial_priority;
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2844 No roundrobin failover FCF available\n");
if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
@@ -15407,7 +16370,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
void
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
{
- struct lpfc_fcf_pri *fcf_pri;
+ struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2762 FCF (x%x) reached driver's book "
@@ -15417,7 +16380,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
}
/* Clear the eligible FCF record index bmask */
spin_lock_irq(&phba->hbalock);
- list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+ list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
+ list) {
if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
list_del_init(&fcf_pri->list);
break;
@@ -15987,35 +16951,41 @@ lpfc_drain_txq(struct lpfc_hba *phba)
{
LIST_HEAD(completions);
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
- struct lpfc_iocbq *piocbq = 0;
+ struct lpfc_iocbq *piocbq = NULL;
unsigned long iflags = 0;
char *fail_msg = NULL;
struct lpfc_sglq *sglq;
union lpfc_wqe wqe;
+ int txq_cnt = 0;
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (pring->txq_cnt > pring->txq_max)
- pring->txq_max = pring->txq_cnt;
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ list_for_each_entry(piocbq, &pring->txq, list) {
+ txq_cnt++;
+ }
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (txq_cnt > pring->txq_max)
+ pring->txq_max = txq_cnt;
- while (pring->txq_cnt) {
- spin_lock_irqsave(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ while (!list_empty(&pring->txq)) {
+ spin_lock_irqsave(&pring->ring_lock, iflags);
piocbq = lpfc_sli_ringtx_get(phba, pring);
if (!piocbq) {
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2823 txq empty and txq_cnt is %d\n ",
- pring->txq_cnt);
+ txq_cnt);
break;
}
sglq = __lpfc_sli_get_sglq(phba, piocbq);
if (!sglq) {
__lpfc_sli_ringtx_put(phba, pring, piocbq);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
break;
}
+ txq_cnt--;
/* The xri and iocb resources secured,
* attempt to issue request
@@ -16040,12 +17010,12 @@ lpfc_drain_txq(struct lpfc_hba *phba)
piocbq->iotag, piocbq->sli4_xritag);
list_add_tail(&piocbq->list, &completions);
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
}
/* Cancel all the IOCBs that cannot be issued */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
- return pring->txq_cnt;
+ return txq_cnt;
}