diff options
author | Vikas Chaudhary <vikas.chaudhary@qlogic.com> | 2010-07-28 15:53:44 +0530 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2010-07-28 09:04:23 -0500 |
commit | f4f5df23bf72208d0c2f1d8be629839924c2f4c2 (patch) | |
tree | 88c41a002e0f9f4470543209047d1111a51a0d06 /drivers/scsi/qla4xxx/ql4_isr.c | |
parent | dbaf82ece08bf93ae5200f03efd87c4f1fc453f1 (diff) |
[SCSI] qla4xxx: Added support for ISP82XX
Signed-off-by: Vikas Chaudhary <Vikas Chaudhary@qlogic.com>
Signed-off-by: Karen Higgins <karen.higgins@qlogic.com>
Signed-off-by: Ravi Anand <ravi.anand@qlogic.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/qla4xxx/ql4_isr.c')
-rw-r--r-- | drivers/scsi/qla4xxx/ql4_isr.c | 388 |
1 files changed, 325 insertions, 63 deletions
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 596c3031483..68d7942bf2e 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c @@ -118,7 +118,6 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); if (!srb) { - /* FIXMEdg: Don't we need to reset ISP in this case??? */ DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid " "handle 0x%x, sp=%p. This cmd may have already " "been completed.\n", ha->host_no, __func__, @@ -293,6 +292,10 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, case SCS_DEVICE_LOGGED_OUT: case SCS_DEVICE_UNAVAILABLE: + DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: SCS_DEVICE " + "state: 0x%x\n", ha->host_no, + cmd->device->channel, cmd->device->id, + cmd->device->lun, sts_entry->completionStatus)); /* * Mark device missing so that we won't continue to * send I/O to this device. We should get a ddb @@ -339,16 +342,14 @@ status_entry_exit: * This routine process response queue completions in interrupt context. * Hardware_lock locked upon entry **/ -static void qla4xxx_process_response_queue(struct scsi_qla_host * ha) +void qla4xxx_process_response_queue(struct scsi_qla_host *ha) { uint32_t count = 0; struct srb *srb = NULL; struct status_entry *sts_entry; /* Process all responses from response queue */ - while ((ha->response_in = - (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) != - ha->response_out) { + while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) { sts_entry = (struct status_entry *) ha->response_ptr; count++; @@ -413,14 +414,14 @@ static void qla4xxx_process_response_queue(struct scsi_qla_host * ha) sts_entry->hdr.entryType)); goto exit_prq_error; } + ((struct response *)sts_entry)->signature = RESPONSE_PROCESSED; + wmb(); } /* - * Done with responses, update the ISP For QLA4010, this also clears - * the interrupt. + * Tell ISP we're done with response(s). This also clears the interrupt. */ - writel(ha->response_out, &ha->reg->rsp_q_out); - readl(&ha->reg->rsp_q_out); + ha->isp_ops->complete_iocb(ha); return; @@ -430,9 +431,7 @@ exit_prq_invalid_handle: sts_entry->completionStatus)); exit_prq_error: - writel(ha->response_out, &ha->reg->rsp_q_out); - readl(&ha->reg->rsp_q_out); - + ha->isp_ops->complete_iocb(ha); set_bit(DPC_RESET_HA, &ha->dpc_flags); } @@ -448,7 +447,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, uint32_t mbox_status) { int i; - uint32_t mbox_stat2, mbox_stat3; + uint32_t mbox_sts[MBOX_AEN_REG_COUNT]; if ((mbox_status == MBOX_STS_BUSY) || (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) || @@ -460,27 +459,37 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, * Copy all mailbox registers to a temporary * location and set mailbox command done flag */ - for (i = 1; i < ha->mbox_status_count; i++) - ha->mbox_status[i] = - readl(&ha->reg->mailbox[i]); + for (i = 0; i < ha->mbox_status_count; i++) + ha->mbox_status[i] = is_qla8022(ha) + ? readl(&ha->qla4_8xxx_reg->mailbox_out[i]) + : readl(&ha->reg->mailbox[i]); set_bit(AF_MBOX_COMMAND_DONE, &ha->flags); + + if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags)) + complete(&ha->mbx_intr_comp); } } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) { + for (i = 0; i < MBOX_AEN_REG_COUNT; i++) + mbox_sts[i] = is_qla8022(ha) + ? readl(&ha->qla4_8xxx_reg->mailbox_out[i]) + : readl(&ha->reg->mailbox[i]); + /* Immediately process the AENs that don't require much work. * Only queue the database_changed AENs */ if (ha->aen_log.count < MAX_AEN_ENTRIES) { for (i = 0; i < MBOX_AEN_REG_COUNT; i++) ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] = - readl(&ha->reg->mailbox[i]); + mbox_sts[i]; ha->aen_log.count++; } switch (mbox_status) { case MBOX_ASTS_SYSTEM_ERROR: /* Log Mailbox registers */ + ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__); if (ql4xdontresethba) { - DEBUG2(printk("%s:Dont Reset HBA\n", - __func__)); + DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n", + ha->host_no, __func__)); } else { set_bit(AF_GET_CRASH_RECORD, &ha->flags); set_bit(DPC_RESET_HA, &ha->dpc_flags); @@ -502,18 +511,15 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, if (test_bit(AF_INIT_DONE, &ha->flags)) set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); - DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x Adapter" - " LINK UP\n", ha->host_no, - mbox_status)); + ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__); break; case MBOX_ASTS_LINK_DOWN: clear_bit(AF_LINK_UP, &ha->flags); - set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); + if (test_bit(AF_INIT_DONE, &ha->flags)) + set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); - DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x Adapter" - " LINK DOWN\n", ha->host_no, - mbox_status)); + ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__); break; case MBOX_ASTS_HEARTBEAT: @@ -539,12 +545,17 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, break; case MBOX_ASTS_IP_ADDR_STATE_CHANGED: - mbox_stat2 = readl(&ha->reg->mailbox[2]); - mbox_stat3 = readl(&ha->reg->mailbox[3]); - - if ((mbox_stat3 == 5) && (mbox_stat2 == 3)) + printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, " + "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0], + mbox_sts[2], mbox_sts[3]); + + /* mbox_sts[2] = Old ACB state + * mbox_sts[3] = new ACB state */ + if ((mbox_sts[3] == ACB_STATE_VALID) && + (mbox_sts[2] == ACB_STATE_TENTATIVE)) set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); - else if ((mbox_stat3 == 2) && (mbox_stat2 == 5)) + else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) && + (mbox_sts[2] == ACB_STATE_VALID)) set_bit(DPC_RESET_HA, &ha->dpc_flags); break; @@ -553,9 +564,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, /* No action */ DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, " "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n", - ha->host_no, mbox_status, - readl(&ha->reg->mailbox[1]), - readl(&ha->reg->mailbox[2]))); + ha->host_no, mbox_sts[0], + mbox_sts[1], mbox_sts[2])); break; case MBOX_ASTS_SELF_TEST_FAILED: @@ -563,10 +573,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, /* No action */ DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, " "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n", - ha->host_no, mbox_status, - readl(&ha->reg->mailbox[1]), - readl(&ha->reg->mailbox[2]), - readl(&ha->reg->mailbox[3]))); + ha->host_no, mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3])); break; case MBOX_ASTS_DATABASE_CHANGED: @@ -577,21 +585,17 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, /* decrement available counter */ ha->aen_q_count--; - for (i = 1; i < MBOX_AEN_REG_COUNT; i++) + for (i = 0; i < MBOX_AEN_REG_COUNT; i++) ha->aen_q[ha->aen_in].mbox_sts[i] = - readl(&ha->reg->mailbox[i]); - - ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status; + mbox_sts[i]; /* print debug message */ DEBUG2(printk("scsi%ld: AEN[%d] %04x queued" - " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n", - ha->host_no, ha->aen_in, - mbox_status, - ha->aen_q[ha->aen_in].mbox_sts[1], - ha->aen_q[ha->aen_in].mbox_sts[2], - ha->aen_q[ha->aen_in].mbox_sts[3], - ha->aen_q[ha->aen_in]. mbox_sts[4])); + " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n", + ha->host_no, ha->aen_in, mbox_sts[0], + mbox_sts[1], mbox_sts[2], mbox_sts[3], + mbox_sts[4])); + /* advance pointer */ ha->aen_in++; if (ha->aen_in == MAX_AEN_ENTRIES) @@ -603,18 +607,16 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, DEBUG2(printk("scsi%ld: %s: aen %04x, queue " "overflowed! AEN LOST!!\n", ha->host_no, __func__, - mbox_status)); + mbox_sts[0])); DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n", ha->host_no)); for (i = 0; i < MAX_AEN_ENTRIES; i++) { DEBUG2(printk("AEN[%d] %04x %04x %04x " - "%04x\n", i, - ha->aen_q[i].mbox_sts[0], - ha->aen_q[i].mbox_sts[1], - ha->aen_q[i].mbox_sts[2], - ha->aen_q[i].mbox_sts[3])); + "%04x\n", i, mbox_sts[0], + mbox_sts[1], mbox_sts[2], + mbox_sts[3])); } } break; @@ -622,7 +624,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, default: DEBUG2(printk(KERN_WARNING "scsi%ld: AEN %04x UNKNOWN\n", - ha->host_no, mbox_status)); + ha->host_no, mbox_sts[0])); break; } } else { @@ -634,6 +636,30 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, } /** + * qla4_8xxx_interrupt_service_routine - isr + * @ha: pointer to host adapter structure. + * + * This is the main interrupt service routine. + * hardware_lock locked upon entry. runs in interrupt context. + **/ +void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha, + uint32_t intr_status) +{ + /* Process response queue interrupt. */ + if (intr_status & HSRX_RISC_IOCB_INT) + qla4xxx_process_response_queue(ha); + + /* Process mailbox/asynch event interrupt.*/ + if (intr_status & HSRX_RISC_MB_INT) + qla4xxx_isr_decode_mailbox(ha, + readl(&ha->qla4_8xxx_reg->mailbox_out[0])); + + /* clear the interrupt */ + writel(0, &ha->qla4_8xxx_reg->host_int); + readl(&ha->qla4_8xxx_reg->host_int); +} + +/** * qla4xxx_interrupt_service_routine - isr * @ha: pointer to host adapter structure. * @@ -660,6 +686,28 @@ void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha, } /** + * qla4_8xxx_spurious_interrupt - processes spurious interrupt + * @ha: pointer to host adapter structure. + * @reqs_count: . + * + **/ +static void qla4_8xxx_spurious_interrupt(struct scsi_qla_host *ha, + uint8_t reqs_count) +{ + if (reqs_count) + return; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n")); + if (is_qla8022(ha)) { + writel(0, &ha->qla4_8xxx_reg->host_int); + if (test_bit(AF_INTx_ENABLED, &ha->flags)) + qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, + 0xfbff); + } + ha->spurious_int_count++; +} + +/** * qla4xxx_intr_handler - hardware interrupt handler. * @irq: Unused * @dev_id: Pointer to host adapter structure @@ -689,15 +737,14 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id) /* * Read interrupt status */ - if (le32_to_cpu(ha->shadow_regs->rsp_q_in) != + if (ha->isp_ops->rd_shdw_rsp_q_in(ha) != ha->response_out) intr_status = CSR_SCSI_COMPLETION_INTR; else intr_status = readl(&ha->reg->ctrl_status); if ((intr_status & - (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == - 0) { + (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) { if (reqs_count == 0) ha->spurious_int_count++; break; @@ -739,22 +786,159 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id) &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); - if (!ql4_mod_unload) + if (!test_bit(AF_HBA_GOING_AWAY, &ha->flags)) set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); break; } else if (intr_status & INTR_PENDING) { - qla4xxx_interrupt_service_routine(ha, intr_status); + ha->isp_ops->interrupt_service_routine(ha, intr_status); ha->total_io_count++; if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) break; + } + } + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return IRQ_HANDLED; +} + +/** + * qla4_8xxx_intr_handler - hardware interrupt handler. + * @irq: Unused + * @dev_id: Pointer to host adapter structure + **/ +irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id) +{ + struct scsi_qla_host *ha = dev_id; + uint32_t intr_status; + uint32_t status; + unsigned long flags = 0; + uint8_t reqs_count = 0; + + ha->isr_count++; + status = qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); + if (!(status & ha->nx_legacy_intr.int_vec_bit)) + return IRQ_NONE; + + status = qla4_8xxx_rd_32(ha, ISR_INT_STATE_REG); + if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s legacy Int not triggered\n", __func__)); + return IRQ_NONE; + } + + /* clear the interrupt */ + qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); + + /* read twice to ensure write is flushed */ + qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); + qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); + + spin_lock_irqsave(&ha->hardware_lock, flags); + while (1) { + if (!(readl(&ha->qla4_8xxx_reg->host_int) & + ISRX_82XX_RISC_INT)) { + qla4_8xxx_spurious_interrupt(ha, reqs_count); + break; + } + intr_status = readl(&ha->qla4_8xxx_reg->host_status); + if ((intr_status & + (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) { + qla4_8xxx_spurious_interrupt(ha, reqs_count); + break; + } + + ha->isp_ops->interrupt_service_routine(ha, intr_status); + + /* Enable Interrupt */ + qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); - intr_status = 0; + if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) + break; + } + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return IRQ_HANDLED; +} + +irqreturn_t +qla4_8xxx_msi_handler(int irq, void *dev_id) +{ + struct scsi_qla_host *ha; + + ha = (struct scsi_qla_host *) dev_id; + if (!ha) { + DEBUG2(printk(KERN_INFO + "qla4xxx: MSIX: Interrupt with NULL host ptr\n")); + return IRQ_NONE; + } + + ha->isr_count++; + /* clear the interrupt */ + qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); + + /* read twice to ensure write is flushed */ + qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); + qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); + + return qla4_8xxx_default_intr_handler(irq, dev_id); +} + +/** + * qla4_8xxx_default_intr_handler - hardware interrupt handler. + * @irq: Unused + * @dev_id: Pointer to host adapter structure + * + * This interrupt handler is called directly for MSI-X, and + * called indirectly for MSI. + **/ +irqreturn_t +qla4_8xxx_default_intr_handler(int irq, void *dev_id) +{ + struct scsi_qla_host *ha = dev_id; + unsigned long flags; + uint32_t intr_status; + uint8_t reqs_count = 0; + + spin_lock_irqsave(&ha->hardware_lock, flags); + while (1) { + if (!(readl(&ha->qla4_8xxx_reg->host_int) & + ISRX_82XX_RISC_INT)) { + qla4_8xxx_spurious_interrupt(ha, reqs_count); + break; + } + + intr_status = readl(&ha->qla4_8xxx_reg->host_status); + if ((intr_status & + (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) { + qla4_8xxx_spurious_interrupt(ha, reqs_count); + break; } + + ha->isp_ops->interrupt_service_routine(ha, intr_status); + + if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) + break; } + ha->isr_count++; spin_unlock_irqrestore(&ha->hardware_lock, flags); + return IRQ_HANDLED; +} +irqreturn_t +qla4_8xxx_msix_rsp_q(int irq, void *dev_id) +{ + struct scsi_qla_host *ha = dev_id; + unsigned long flags; + + spin_lock_irqsave(&ha->hardware_lock, flags); + qla4xxx_process_response_queue(ha); + writel(0, &ha->qla4_8xxx_reg->host_int); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + ha->isr_count++; return IRQ_HANDLED; } @@ -825,7 +1009,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) ((ddb_entry->default_time2wait + 4) * HZ); - DEBUG2(printk("scsi%ld: ddb index [%d] initate" + DEBUG2(printk("scsi%ld: ddb [%d] initate" " RELOGIN after %d seconds\n", ha->host_no, ddb_entry->fw_ddb_index, @@ -847,3 +1031,81 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) spin_unlock_irqrestore(&ha->hardware_lock, flags); } +int qla4xxx_request_irqs(struct scsi_qla_host *ha) +{ + int ret; + + if (!is_qla8022(ha)) + goto try_intx; + + if (ql4xenablemsix == 2) + goto try_msi; + + if (ql4xenablemsix == 0 || ql4xenablemsix != 1) + goto try_intx; + + /* Trying MSI-X */ + ret = qla4_8xxx_enable_msix(ha); + if (!ret) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "MSI-X: Enabled (0x%X).\n", ha->revision_id)); + goto irq_attached; + } + + ql4_printk(KERN_WARNING, ha, + "MSI-X: Falling back-to MSI mode -- %d.\n", ret); + +try_msi: + /* Trying MSI */ + ret = pci_enable_msi(ha->pdev); + if (!ret) { + ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler, + IRQF_DISABLED|IRQF_SHARED, DRIVER_NAME, ha); + if (!ret) { + DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n")); + set_bit(AF_MSI_ENABLED, &ha->flags); + goto irq_attached; + } else { + ql4_printk(KERN_WARNING, ha, + "MSI: Failed to reserve interrupt %d " + "already in use.\n", ha->pdev->irq); + pci_disable_msi(ha->pdev); + } + } + ql4_printk(KERN_WARNING, ha, + "MSI: Falling back-to INTx mode -- %d.\n", ret); + +try_intx: + /* Trying INTx */ + ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, + IRQF_DISABLED|IRQF_SHARED, DRIVER_NAME, ha); + if (!ret) { + DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n")); + set_bit(AF_INTx_ENABLED, &ha->flags); + goto irq_attached; + + } else { + ql4_printk(KERN_WARNING, ha, + "INTx: Failed to reserve interrupt %d already in" + " use.\n", ha->pdev->irq); + return ret; + } + +irq_attached: + set_bit(AF_IRQ_ATTACHED, &ha->flags); + ha->host->irq = ha->pdev->irq; + ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n", + __func__, ha->pdev->irq); + return ret; +} + +void qla4xxx_free_irqs(struct scsi_qla_host *ha) +{ + if (test_bit(AF_MSIX_ENABLED, &ha->flags)) + qla4_8xxx_disable_msix(ha); + else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) { + free_irq(ha->pdev->irq, ha); + pci_disable_msi(ha->pdev); + } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) + free_irq(ha->pdev->irq, ha); +} |