diff options
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_isr.c')
| -rw-r--r-- | drivers/scsi/qla2xxx/qla_isr.c | 336 |
1 files changed, 228 insertions, 108 deletions
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 2d8e7b81235..a56825c73c3 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -56,6 +56,16 @@ qla2100_intr_handler(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { hccr = RD_REG_WORD(®->hccr); + /* Check for PCI disconnection */ + if (hccr == 0xffff) { + /* + * Schedule this on the default system workqueue so that + * all the adapter workqueues and the DPC thread can be + * shutdown cleanly. + */ + schedule_work(&ha->board_disable); + break; + } if (hccr & HCCR_RISC_PAUSE) { if (pci_channel_offline(ha->pdev)) break; @@ -110,6 +120,22 @@ qla2100_intr_handler(int irq, void *dev_id) return (IRQ_HANDLED); } +bool +qla2x00_check_reg_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) +{ + /* Check for PCI disconnection */ + if (reg == 0xffffffff) { + /* + * Schedule this on the default system workqueue so that all the + * adapter workqueues and the DPC thread can be shutdown + * cleanly. + */ + schedule_work(&vha->hw->board_disable); + return true; + } else + return false; +} + /** * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. * @irq: @@ -148,11 +174,14 @@ qla2300_intr_handler(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->u.isp2300.host_status); + if (qla2x00_check_reg_for_disconnect(vha, stat)) + break; if (stat & HSR_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = RD_REG_WORD(®->hccr); + if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) ql_log(ql_log_warn, vha, 0x5026, "Parity error -- HCCR=%x, Dumping " @@ -269,11 +298,18 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) { "Complete", "Request Notification", "Time Extension" }; int rval; struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; + struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; uint16_t __iomem *wptr; uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; /* Seed data -- mailbox1 -> mailbox7. */ - wptr = (uint16_t __iomem *)®24->mailbox1; + if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) + wptr = (uint16_t __iomem *)®24->mailbox1; + else if (IS_QLA8044(vha->hw)) + wptr = (uint16_t __iomem *)®82->mailbox_out[1]; + else + return; + for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) mb[cnt] = RD_REG_WORD(wptr); @@ -282,40 +318,54 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) "%04x %04x %04x %04x %04x %04x %04x.\n", event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6]); - if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) { - vha->hw->flags.idc_compl_status = 1; - if (vha->hw->notify_dcbx_comp) - complete(&vha->hw->dcbx_comp); - } - - /* Acknowledgement needed? [Notify && non-zero timeout]. */ - timeout = (descr >> 8) & 0xf; - if (aen != MBA_IDC_NOTIFY || !timeout) - return; + switch (aen) { + /* Handle IDC Error completion case. */ + case MBA_IDC_COMPLETE: + if (mb[1] >> 15) { + vha->hw->flags.idc_compl_status = 1; + if (vha->hw->notify_dcbx_comp && !vha->vp_idx) + complete(&vha->hw->dcbx_comp); + } + break; - ql_dbg(ql_dbg_async, vha, 0x5022, - "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", - vha->host_no, event[aen & 0xff], timeout); + case MBA_IDC_NOTIFY: + /* Acknowledgement needed? [Notify && non-zero timeout]. */ + timeout = (descr >> 8) & 0xf; + ql_dbg(ql_dbg_async, vha, 0x5022, + "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", + vha->host_no, event[aen & 0xff], timeout); - rval = qla2x00_post_idc_ack_work(vha, mb); - if (rval != QLA_SUCCESS) - ql_log(ql_log_warn, vha, 0x5023, - "IDC failed to post ACK.\n"); + if (!timeout) + return; + rval = qla2x00_post_idc_ack_work(vha, mb); + if (rval != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0x5023, + "IDC failed to post ACK.\n"); + break; + case MBA_IDC_TIME_EXT: + vha->hw->idc_extend_tmo = descr; + ql_dbg(ql_dbg_async, vha, 0x5087, + "%lu Inter-Driver Communication %s -- " + "Extend timeout by=%d.\n", + vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); + break; + } } #define LS_UNKNOWN 2 const char * qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) { - static const char * const link_speeds[] = { - "1", "2", "?", "4", "8", "16", "10" + static const char *const link_speeds[] = { + "1", "2", "?", "4", "8", "16", "32", "10" }; +#define QLA_LAST_SPEED 7 if (IS_QLA2100(ha) || IS_QLA2200(ha)) return link_speeds[0]; else if (speed == 0x13) - return link_speeds[6]; - else if (speed < 6) + return link_speeds[QLA_LAST_SPEED]; + else if (speed < QLA_LAST_SPEED) return link_speeds[speed]; else return link_speeds[LS_UNKNOWN]; @@ -600,7 +650,7 @@ skip_rio: break; case MBA_SYSTEM_ERR: /* System Error */ - mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ? + mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? RD_REG_WORD(®24->mailbox7) : 0; ql_log(ql_log_warn, vha, 0x5003, "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " @@ -617,7 +667,7 @@ skip_rio: vha->device_flags |= DFLG_DEV_FAILED; } else { /* Check to see if MPI timeout occurred */ - if ((mbx & MBX_3) && (ha->flags.port0)) + if ((mbx & MBX_3) && (ha->port_no == 0)) set_bit(MPI_RESET_NEEDED, &vha->dpc_flags); @@ -691,7 +741,8 @@ skip_rio: case MBA_LOOP_DOWN: /* Loop Down Event */ mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) ? RD_REG_WORD(®24->mailbox4) : 0; - mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; + mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) + : mbx; ql_dbg(ql_dbg_async, vha, 0x500b, "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3], mbx); @@ -740,11 +791,11 @@ skip_rio: if (IS_QLA2100(ha)) break; - if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) { + if (IS_CNA_CAPABLE(ha)) { ql_dbg(ql_dbg_async, vha, 0x500d, "DCBX Completed -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); - if (ha->notify_dcbx_comp) + if (ha->notify_dcbx_comp && !vha->vp_idx) complete(&ha->dcbx_comp); } else @@ -1002,7 +1053,7 @@ skip_rio: mb[1], mb[2], mb[3]); break; case MBA_IDC_NOTIFY: - if (IS_QLA8031(vha->hw)) { + if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { mb[4] = RD_REG_WORD(®24->mailbox4); if (((mb[2] & 0x7fff) == MBC_PORT_RESET || (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && @@ -1018,11 +1069,12 @@ skip_rio: } } case MBA_IDC_COMPLETE: - if (ha->notify_lb_portup_comp) + if (ha->notify_lb_portup_comp && !vha->vp_idx) complete(&ha->lb_portup_comp); /* Fallthru */ case MBA_IDC_TIME_EXT: - if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) + if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || + IS_QLA8044(ha)) qla81xx_idc_event(vha, mb[0], mb[1]); break; @@ -1063,7 +1115,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, ql_log(ql_log_warn, vha, 0x3014, "Invalid SCSI command index (%x).\n", index); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -1080,7 +1132,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, } else { ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -1100,7 +1152,7 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, if (index >= req->num_outstanding_cmds) { ql_log(ql_log_warn, vha, 0x5031, "Invalid command index (%x).\n", index); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -1446,8 +1498,7 @@ logio_done: } static void -qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, - struct tsk_mgmt_entry *tsk) +qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) { const char func[] = "TMF-IOCB"; const char *type; @@ -1455,7 +1506,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, srb_t *sp; struct srb_iocb *iocb; struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; - int error = 1; sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); if (!sp) @@ -1464,37 +1514,35 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, iocb = &sp->u.iocb_cmd; type = sp->name; fcport = sp->fcport; + iocb->u.tmf.data = QLA_SUCCESS; if (sts->entry_status) { ql_log(ql_log_warn, fcport->vha, 0x5038, "Async-%s error - hdl=%x entry-status(%x).\n", type, sp->handle, sts->entry_status); + iocb->u.tmf.data = QLA_FUNCTION_FAILED; } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { ql_log(ql_log_warn, fcport->vha, 0x5039, "Async-%s error - hdl=%x completion status(%x).\n", type, sp->handle, sts->comp_status); - } else if (!(le16_to_cpu(sts->scsi_status) & + iocb->u.tmf.data = QLA_FUNCTION_FAILED; + } else if ((le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID)) { - ql_log(ql_log_warn, fcport->vha, 0x503a, - "Async-%s error - hdl=%x no response info(%x).\n", - type, sp->handle, sts->scsi_status); - } else if (le32_to_cpu(sts->rsp_data_len) < 4) { - ql_log(ql_log_warn, fcport->vha, 0x503b, - "Async-%s error - hdl=%x not enough response(%d).\n", - type, sp->handle, sts->rsp_data_len); - } else if (sts->data[3]) { - ql_log(ql_log_warn, fcport->vha, 0x503c, - "Async-%s error - hdl=%x response(%x).\n", - type, sp->handle, sts->data[3]); - } else { - error = 0; + if (le32_to_cpu(sts->rsp_data_len) < 4) { + ql_log(ql_log_warn, fcport->vha, 0x503b, + "Async-%s error - hdl=%x not enough response(%d).\n", + type, sp->handle, sts->rsp_data_len); + } else if (sts->data[3]) { + ql_log(ql_log_warn, fcport->vha, 0x503c, + "Async-%s error - hdl=%x response(%x).\n", + type, sp->handle, sts->data[3]); + iocb->u.tmf.data = QLA_FUNCTION_FAILED; + } } - if (error) { - iocb->u.tmf.data = error; + if (iocb->u.tmf.data != QLA_SUCCESS) ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, (uint8_t *)sts, sizeof(*sts)); - } sp->done(vha, sp, 0); } @@ -1805,6 +1853,9 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, if (scsi_status == 0) { bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; + vha->qla_stats.input_bytes += + bsg_job->reply->reply_payload_rcv_len; + vha->qla_stats.input_requests++; rval = EXT_STATUS_OK; } goto done; @@ -1939,6 +1990,15 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) que = MSW(sts->handle); req = ha->req_q_map[que]; + /* Check for invalid queue pointer */ + if (req == NULL || + que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { + ql_dbg(ql_dbg_io, vha, 0x3059, + "Invalid status handle (0x%x): Bad req pointer. req=%p, " + "que=%u.\n", sts->handle, req, que); + return; + } + /* Validate handle. */ if (handle < req->num_outstanding_cmds) sp = req->outstanding_cmds[handle]; @@ -1949,11 +2009,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) ql_dbg(ql_dbg_io, vha, 0x3017, "Invalid status handle (0x%x).\n", sts->handle); - if (IS_QLA82XX(ha)) - set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); - else - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - qla2xxx_wake_dpc(vha); + if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { + if (IS_P3P_TYPE(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } return; } @@ -1962,9 +2024,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) return; } + /* Task Management completion. */ + if (sp->type == SRB_TM_CMD) { + qla24xx_tm_iocb_entry(vha, req, pkt); + return; + } + /* Fast path completion. */ if (comp_status == CS_COMPLETE && scsi_status == 0) { - qla2x00_do_host_ramp_up(vha); qla2x00_process_completed_request(vha, req, handle); return; @@ -2176,8 +2243,10 @@ check_scsi_status: } ql_dbg(ql_dbg_io, fcport->vha, 0x3021, - "Port down status: port-state=0x%x.\n", - atomic_read(&fcport->state)); + "Port to be marked lost on fcport=%02x%02x%02x, current " + "port state= %s.\n", fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa, + port_state_str[atomic_read(&fcport->state)]); if (atomic_read(&fcport->state) == FCS_ONLINE) qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); @@ -2212,21 +2281,15 @@ check_scsi_status: out: if (logit) ql_dbg(ql_dbg_io, fcport->vha, 0x3022, - "FCP command status: 0x%x-0x%x (0x%x) " - "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x " - "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x " + "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d " + "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", comp_status, scsi_status, res, vha->host_no, cp->device->id, cp->device->lun, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, - cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3], - cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7], - cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len, + cp->cmnd, scsi_bufflen(cp), rsp_info_len, resid_len, fw_resid_len); - if (!res) - qla2x00_do_host_ramp_up(vha); - if (rsp->status_srb == NULL) sp->done(ha, sp, res); } @@ -2324,7 +2387,7 @@ fatal: ql_log(ql_log_warn, vha, 0x5030, "Error entry - invalid handle/queue.\n"); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -2367,6 +2430,23 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) } } +static void +qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct abort_entry_24xx *pkt) +{ + const char func[] = "ABT_IOCB"; + srb_t *sp; + struct srb_iocb *abt; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + abt = &sp->u.iocb_cmd; + abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); + sp->done(vha, sp, 0); +} + /** * qla24xx_process_response_queue() - Process response queue entries. * @ha: SCSI driver HA context @@ -2394,12 +2474,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (pkt->entry_status != 0) { qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); - (void)qlt_24xx_process_response_error(vha, pkt); + if (qlt_24xx_process_response_error(vha, pkt)) + goto process_err; ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); continue; } +process_err: switch (pkt->entry_type) { case STATUS_TYPE: @@ -2416,14 +2498,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, qla24xx_logio_entry(vha, rsp->req, (struct logio_entry_24xx *)pkt); break; - case TSK_MGMT_IOCB_TYPE: - qla24xx_tm_iocb_entry(vha, rsp->req, - (struct tsk_mgmt_entry *)pkt); - break; - case CT_IOCB_TYPE: + case CT_IOCB_TYPE: qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); break; - case ELS_IOCB_TYPE: + case ELS_IOCB_TYPE: qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); break; case ABTS_RECV_24XX: @@ -2432,6 +2510,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, case ABTS_RESP_24XX: case CTIO_TYPE7: case NOTIFY_ACK_TYPE: + case CTIO_CRC2: qlt_response_pkt_all_vps(vha, (response_t *)pkt); break; case MARKER_TYPE: @@ -2439,6 +2518,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, * from falling into default case */ break; + case ABORT_IOCB_TYPE: + qla24xx_abort_iocb_entry(vha, rsp->req, + (struct abort_entry_24xx *)pkt); + break; default: /* Type Not Supported. */ ql_dbg(ql_dbg_async, vha, 0x5042, @@ -2452,7 +2535,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, } /* Adjust ring index */ - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); } else @@ -2467,7 +2550,8 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) + if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha)) return; rval = QLA_SUCCESS; @@ -2549,6 +2633,8 @@ qla24xx_intr_handler(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->host_status); + if (qla2x00_check_reg_for_disconnect(vha, stat)) + break; if (stat & HSRX_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; @@ -2618,6 +2704,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) struct device_reg_24xx __iomem *reg; struct scsi_qla_host *vha; unsigned long flags; + uint32_t stat = 0; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -2631,11 +2718,19 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); + /* + * Use host_status register to check to PCI disconnection before we + * we process the response queue. + */ + stat = RD_REG_DWORD(®->host_status); + if (qla2x00_check_reg_for_disconnect(vha, stat)) + goto out; qla24xx_process_response_queue(vha, rsp); if (!ha->flags.disable_msix_handshake) { WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(®->hccr); } +out: spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; @@ -2645,9 +2740,11 @@ static irqreturn_t qla25xx_msix_rsp_q(int irq, void *dev_id) { struct qla_hw_data *ha; + scsi_qla_host_t *vha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; unsigned long flags; + uint32_t hccr = 0; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -2656,17 +2753,21 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) return IRQ_NONE; } ha = rsp->hw; + vha = pci_get_drvdata(ha->pdev); /* Clear the interrupt, if enabled, for this response queue */ if (!ha->flags.disable_msix_handshake) { reg = &ha->iobase->isp24; spin_lock_irqsave(&ha->hardware_lock, flags); WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); - RD_REG_DWORD_RELAXED(®->hccr); + hccr = RD_REG_DWORD_RELAXED(®->hccr); spin_unlock_irqrestore(&ha->hardware_lock, flags); } + if (qla2x00_check_reg_for_disconnect(vha, hccr)) + goto out; queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); +out: return IRQ_HANDLED; } @@ -2697,6 +2798,8 @@ qla24xx_msix_default(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); do { stat = RD_REG_DWORD(®->host_status); + if (qla2x00_check_reg_for_disconnect(vha, stat)) + break; if (stat & HSRX_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; @@ -2803,6 +2906,7 @@ static int qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) { #define MIN_MSIX_COUNT 2 +#define ATIO_VECTOR 2 int i, ret; struct msix_entry *entries; struct qla_msix_entry *qentry; @@ -2859,36 +2963,49 @@ msix_failed: } /* Enable MSI-X vectors for the base queue */ - for (i = 0; i < ha->msix_count; i++) { + for (i = 0; i < 2; i++) { qentry = &ha->msix_entries[i]; - if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { - ret = request_irq(qentry->vector, - qla83xx_msix_entries[i].handler, - 0, qla83xx_msix_entries[i].name, rsp); - } else if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) ret = request_irq(qentry->vector, qla82xx_msix_entries[i].handler, 0, qla82xx_msix_entries[i].name, rsp); - } else { + else ret = request_irq(qentry->vector, msix_entries[i].handler, 0, msix_entries[i].name, rsp); - } - if (ret) { - ql_log(ql_log_fatal, vha, 0x00cb, - "MSI-X: unable to register handler -- %x/%d.\n", - qentry->vector, ret); - qla24xx_disable_msix(ha); - ha->mqenable = 0; - goto msix_out; - } + if (ret) + goto msix_register_fail; + qentry->have_irq = 1; + qentry->rsp = rsp; + rsp->msix = qentry; + } + + /* + * If target mode is enable, also request the vector for the ATIO + * queue. + */ + if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { + qentry = &ha->msix_entries[ATIO_VECTOR]; + ret = request_irq(qentry->vector, + qla83xx_msix_entries[ATIO_VECTOR].handler, + 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp); qentry->have_irq = 1; qentry->rsp = rsp; rsp->msix = qentry; } +msix_register_fail: + if (ret) { + ql_log(ql_log_fatal, vha, 0x00cb, + "MSI-X: unable to register handler -- %x/%d.\n", + qentry->vector, ret); + qla24xx_disable_msix(ha); + ha->mqenable = 0; + goto msix_out; + } + /* Enable MSI-X vector for response queue update for queue 0 */ - if (IS_QLA83XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { if (ha->msixbase && ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) ha->mqenable = 1; @@ -2911,13 +3028,14 @@ msix_out: int qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) { - int ret; - device_reg_t __iomem *reg = ha->iobase; + int ret = QLA_FUNCTION_FAILED; + device_reg_t *reg = ha->iobase; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); /* If possible, enable MSI-X. */ if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && - !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha)) + !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) && + !IS_QLA27XX(ha)) goto skip_msi; if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && @@ -2945,12 +3063,15 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) ha->chip_revision, ha->fw_attributes); goto clear_risc_ints; } - ql_log(ql_log_info, vha, 0x0037, - "MSI-X Falling back-to MSI mode -%d.\n", ret); + skip_msix: + ql_log(ql_log_info, vha, 0x0037, + "Falling back-to MSI mode -%d.\n", ret); + if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && - !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha)) + !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && + !IS_QLA27XX(ha)) goto skip_msi; ret = pci_enable_msi(ha->pdev); @@ -2960,14 +3081,13 @@ skip_msix: ha->flags.msi_enabled = 1; } else ql_log(ql_log_warn, vha, 0x0039, - "MSI-X; Falling back-to INTa mode -- %d.\n", ret); + "Falling back-to INTa mode -- %d.\n", ret); +skip_msi: /* Skip INTx on ISP82xx. */ if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) return QLA_FUNCTION_FAILED; -skip_msi: - ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, ha->flags.msi_enabled ? 0 : IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp); |
