diff options
Diffstat (limited to 'drivers/ata/sata_nv.c')
| -rw-r--r-- | drivers/ata/sata_nv.c | 478 |
1 files changed, 208 insertions, 270 deletions
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 0c82d335c55..cdf99fac139 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -38,8 +38,8 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/gfp.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> @@ -271,7 +271,7 @@ enum ncq_saw_flag_list { }; struct nv_swncq_port_priv { - struct ata_prd *prd; /* our SG list */ + struct ata_bmdma_prd *prd; /* our SG list */ dma_addr_t prd_dma; /* and its DMA mapping */ void __iomem *sactive_block; void __iomem *irq_block; @@ -295,7 +295,7 @@ struct nv_swncq_port_priv { #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT))))) static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int nv_pci_device_resume(struct pci_dev *pdev); #endif static void nv_ck804_host_stop(struct ata_host *host); @@ -379,7 +379,7 @@ static struct pci_driver nv_pci_driver = { .name = DRV_NAME, .id_table = nv_pci_tbl, .probe = nv_init_one, -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP .suspend = ata_pci_device_suspend, .resume = nv_pci_device_resume, #endif @@ -538,7 +538,7 @@ struct nv_pi_priv { static const struct ata_port_info nv_port_info[] = { /* generic */ { - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, + .flags = ATA_FLAG_SATA, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, @@ -547,7 +547,7 @@ static const struct ata_port_info nv_port_info[] = { }, /* nforce2/3 */ { - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, + .flags = ATA_FLAG_SATA, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, @@ -556,7 +556,7 @@ static const struct ata_port_info nv_port_info[] = { }, /* ck804 */ { - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, + .flags = ATA_FLAG_SATA, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, @@ -565,8 +565,7 @@ static const struct ata_port_info nv_port_info[] = { }, /* ADMA */ { - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_MMIO | ATA_FLAG_NCQ, + .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, @@ -575,7 +574,7 @@ static const struct ata_port_info nv_port_info[] = { }, /* MCP5x */ { - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, + .flags = ATA_FLAG_SATA, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, @@ -584,8 +583,7 @@ static const struct ata_port_info nv_port_info[] = { }, /* SWNCQ */ { - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_NCQ, + .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, @@ -600,9 +598,9 @@ MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, nv_pci_tbl); MODULE_VERSION(DRV_VERSION); -static int adma_enabled; -static int swncq_enabled = 1; -static int msi_enabled; +static bool adma_enabled; +static bool swncq_enabled = 1; +static bool msi_enabled; static void nv_adma_register_mode(struct ata_port *ap) { @@ -621,9 +619,8 @@ static void nv_adma_register_mode(struct ata_port *ap) count++; } if (count == 20) - ata_port_printk(ap, KERN_WARNING, - "timeout waiting for ADMA IDLE, stat=0x%hx\n", - status); + ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n", + status); tmp = readw(mmio + NV_ADMA_CTL); writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); @@ -636,9 +633,9 @@ static void nv_adma_register_mode(struct ata_port *ap) count++; } if (count == 20) - ata_port_printk(ap, KERN_WARNING, - "timeout waiting for ADMA LEGACY, stat=0x%hx\n", - status); + ata_port_warn(ap, + "timeout waiting for ADMA LEGACY, stat=0x%hx\n", + status); pp->flags |= NV_ADMA_PORT_REGISTER_MODE; } @@ -666,7 +663,7 @@ static void nv_adma_mode(struct ata_port *ap) count++; } if (count == 20) - ata_port_printk(ap, KERN_WARNING, + ata_port_warn(ap, "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n", status); @@ -772,11 +769,11 @@ static int nv_adma_slave_config(struct scsi_device *sdev) } blk_queue_segment_boundary(sdev->request_queue, segment_boundary); - blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize); - ata_port_printk(ap, KERN_INFO, - "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n", - (unsigned long long)*ap->host->dev->dma_mask, - segment_boundary, sg_tablesize); + blk_queue_max_segments(sdev->request_queue, sg_tablesize); + ata_port_info(ap, + "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n", + (unsigned long long)*ap->host->dev->dma_mask, + segment_boundary, sg_tablesize); spin_unlock_irqrestore(ap->lock, flags); @@ -872,29 +869,11 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) ata_port_freeze(ap); else ata_port_abort(ap); - return 1; + return -1; } - if (likely(flags & NV_CPB_RESP_DONE)) { - struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num); - VPRINTK("CPB flags done, flags=0x%x\n", flags); - if (likely(qc)) { - DPRINTK("Completing qc from tag %d\n", cpb_num); - ata_qc_complete(qc); - } else { - struct ata_eh_info *ehi = &ap->link.eh_info; - /* Notifier bits set without a command may indicate the drive - is misbehaving. Raise host state machine violation on this - condition. */ - ata_port_printk(ap, KERN_ERR, - "notifier for tag %d with no cmd?\n", - cpb_num); - ehi->err_mask |= AC_ERR_HSM; - ehi->action |= ATA_EH_RESET; - ata_port_freeze(ap); - return 1; - } - } + if (likely(flags & NV_CPB_RESP_DONE)) + return 1; return 0; } @@ -919,7 +898,7 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat) } /* handle interrupt */ - return ata_sff_host_intr(ap, qc); + return ata_bmdma_port_intr(ap, qc); } static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) @@ -932,108 +911,115 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; + struct nv_adma_port_priv *pp = ap->private_data; + void __iomem *mmio = pp->ctl_block; + u16 status; + u32 gen_ctl; + u32 notifier, notifier_error; + notifier_clears[i] = 0; - if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { - struct nv_adma_port_priv *pp = ap->private_data; - void __iomem *mmio = pp->ctl_block; - u16 status; - u32 gen_ctl; - u32 notifier, notifier_error; - - /* if ADMA is disabled, use standard ata interrupt handler */ - if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { - u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) - >> (NV_INT_PORT_SHIFT * i); - handled += nv_host_intr(ap, irq_stat); - continue; - } + /* if ADMA is disabled, use standard ata interrupt handler */ + if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { + u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) + >> (NV_INT_PORT_SHIFT * i); + handled += nv_host_intr(ap, irq_stat); + continue; + } - /* if in ATA register mode, check for standard interrupts */ - if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { - u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) - >> (NV_INT_PORT_SHIFT * i); - if (ata_tag_valid(ap->link.active_tag)) - /** NV_INT_DEV indication seems unreliable at times - at least in ADMA mode. Force it on always when a - command is active, to prevent losing interrupts. */ - irq_stat |= NV_INT_DEV; - handled += nv_host_intr(ap, irq_stat); - } + /* if in ATA register mode, check for standard interrupts */ + if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { + u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) + >> (NV_INT_PORT_SHIFT * i); + if (ata_tag_valid(ap->link.active_tag)) + /** NV_INT_DEV indication seems unreliable + at times at least in ADMA mode. Force it + on always when a command is active, to + prevent losing interrupts. */ + irq_stat |= NV_INT_DEV; + handled += nv_host_intr(ap, irq_stat); + } + + notifier = readl(mmio + NV_ADMA_NOTIFIER); + notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); + notifier_clears[i] = notifier | notifier_error; + + gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); + + if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && + !notifier_error) + /* Nothing to do */ + continue; + + status = readw(mmio + NV_ADMA_STAT); + + /* + * Clear status. Ensure the controller sees the + * clearing before we start looking at any of the CPB + * statuses, so that any CPB completions after this + * point in the handler will raise another interrupt. + */ + writew(status, mmio + NV_ADMA_STAT); + readw(mmio + NV_ADMA_STAT); /* flush posted write */ + rmb(); + + handled++; /* irq handled if we got here */ + + /* freeze if hotplugged or controller error */ + if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | + NV_ADMA_STAT_HOTUNPLUG | + NV_ADMA_STAT_TIMEOUT | + NV_ADMA_STAT_SERROR))) { + struct ata_eh_info *ehi = &ap->link.eh_info; + + ata_ehi_clear_desc(ehi); + __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status); + if (status & NV_ADMA_STAT_TIMEOUT) { + ehi->err_mask |= AC_ERR_SYSTEM; + ata_ehi_push_desc(ehi, "timeout"); + } else if (status & NV_ADMA_STAT_HOTPLUG) { + ata_ehi_hotplugged(ehi); + ata_ehi_push_desc(ehi, "hotplug"); + } else if (status & NV_ADMA_STAT_HOTUNPLUG) { + ata_ehi_hotplugged(ehi); + ata_ehi_push_desc(ehi, "hot unplug"); + } else if (status & NV_ADMA_STAT_SERROR) { + /* let EH analyze SError and figure out cause */ + ata_ehi_push_desc(ehi, "SError"); + } else + ata_ehi_push_desc(ehi, "unknown"); + ata_port_freeze(ap); + continue; + } - notifier = readl(mmio + NV_ADMA_NOTIFIER); - notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); - notifier_clears[i] = notifier | notifier_error; - - gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); - - if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && - !notifier_error) - /* Nothing to do */ - continue; - - status = readw(mmio + NV_ADMA_STAT); - - /* Clear status. Ensure the controller sees the clearing before we start - looking at any of the CPB statuses, so that any CPB completions after - this point in the handler will raise another interrupt. */ - writew(status, mmio + NV_ADMA_STAT); - readw(mmio + NV_ADMA_STAT); /* flush posted write */ - rmb(); - - handled++; /* irq handled if we got here */ - - /* freeze if hotplugged or controller error */ - if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | - NV_ADMA_STAT_HOTUNPLUG | - NV_ADMA_STAT_TIMEOUT | - NV_ADMA_STAT_SERROR))) { - struct ata_eh_info *ehi = &ap->link.eh_info; - - ata_ehi_clear_desc(ehi); - __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status); - if (status & NV_ADMA_STAT_TIMEOUT) { - ehi->err_mask |= AC_ERR_SYSTEM; - ata_ehi_push_desc(ehi, "timeout"); - } else if (status & NV_ADMA_STAT_HOTPLUG) { - ata_ehi_hotplugged(ehi); - ata_ehi_push_desc(ehi, "hotplug"); - } else if (status & NV_ADMA_STAT_HOTUNPLUG) { - ata_ehi_hotplugged(ehi); - ata_ehi_push_desc(ehi, "hot unplug"); - } else if (status & NV_ADMA_STAT_SERROR) { - /* let libata analyze SError and figure out the cause */ - ata_ehi_push_desc(ehi, "SError"); - } else - ata_ehi_push_desc(ehi, "unknown"); - ata_port_freeze(ap); - continue; + if (status & (NV_ADMA_STAT_DONE | + NV_ADMA_STAT_CPBERR | + NV_ADMA_STAT_CMD_COMPLETE)) { + u32 check_commands = notifier_clears[i]; + u32 done_mask = 0; + int pos, rc; + + if (status & NV_ADMA_STAT_CPBERR) { + /* check all active commands */ + if (ata_tag_valid(ap->link.active_tag)) + check_commands = 1 << + ap->link.active_tag; + else + check_commands = ap->link.sactive; } - if (status & (NV_ADMA_STAT_DONE | - NV_ADMA_STAT_CPBERR | - NV_ADMA_STAT_CMD_COMPLETE)) { - u32 check_commands = notifier_clears[i]; - int pos, error = 0; - - if (status & NV_ADMA_STAT_CPBERR) { - /* Check all active commands */ - if (ata_tag_valid(ap->link.active_tag)) - check_commands = 1 << - ap->link.active_tag; - else - check_commands = ap-> - link.sactive; - } - - /** Check CPBs for completed commands */ - while ((pos = ffs(check_commands)) && !error) { - pos--; - error = nv_adma_check_cpb(ap, pos, + /* check CPBs for completed commands */ + while ((pos = ffs(check_commands))) { + pos--; + rc = nv_adma_check_cpb(ap, pos, notifier_error & (1 << pos)); - check_commands &= ~(1 << pos); - } + if (rc > 0) + done_mask |= 1 << pos; + else if (unlikely(rc < 0)) + check_commands = 0; + check_commands &= ~(1 << pos); } + ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); } } @@ -1098,7 +1084,7 @@ static void nv_adma_irq_clear(struct ata_port *ap) u32 notifier_clears[2]; if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { - ata_sff_irq_clear(ap); + ata_bmdma_irq_clear(ap); return; } @@ -1129,7 +1115,7 @@ static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc) struct nv_adma_port_priv *pp = qc->ap->private_data; if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) - ata_sff_post_internal_cmd(qc); + ata_bmdma_post_internal_cmd(qc); } static int nv_adma_port_start(struct ata_port *ap) @@ -1154,7 +1140,8 @@ static int nv_adma_port_start(struct ata_port *ap) if (rc) return rc; - rc = ata_port_start(ap); + /* we might fallback to bmdma, allocate bmdma resources */ + rc = ata_bmdma_port_start(ap); if (rc) return rc; @@ -1406,7 +1393,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && (qc->flags & ATA_QCFLAG_DMAMAP)); nv_adma_register_mode(qc->ap); - ata_sff_qc_prep(qc); + ata_bmdma_qc_prep(qc); return; } @@ -1454,8 +1441,7 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) existing commands. */ if (unlikely(qc->tf.protocol == ATA_PROT_NCQ && (qc->flags & ATA_QCFLAG_RESULT_TF))) { - ata_dev_printk(qc->dev, KERN_ERR, - "NCQ w/ RESULT_TF not allowed\n"); + ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n"); return AC_ERR_SYSTEM; } @@ -1465,7 +1451,7 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && (qc->flags & ATA_QCFLAG_DMAMAP)); nv_adma_register_mode(qc->ap); - return ata_sff_qc_issue(qc); + return ata_bmdma_qc_issue(qc); } else nv_adma_mode(qc->ap); @@ -1497,22 +1483,19 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance) spin_lock_irqsave(&host->lock, flags); for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap; - - ap = host->ports[i]; - if (ap && - !(ap->flags & ATA_FLAG_DISABLED)) { - struct ata_queued_cmd *qc; + struct ata_port *ap = host->ports[i]; + struct ata_queued_cmd *qc; - qc = ata_qc_from_tag(ap, ap->link.active_tag); - if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) - handled += ata_sff_host_intr(ap, qc); - else - // No request pending? Clear interrupt status - // anyway, in case there's one pending. - ap->ops->sff_check_status(ap); + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { + handled += ata_bmdma_port_intr(ap, qc); + } else { + /* + * No request pending? Clear interrupt status + * anyway, in case there's one pending. + */ + ap->ops->sff_check_status(ap); } - } spin_unlock_irqrestore(&host->lock, flags); @@ -1525,11 +1508,7 @@ static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat) int i, handled = 0; for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap = host->ports[i]; - - if (ap && !(ap->flags & ATA_FLAG_DISABLED)) - handled += nv_host_intr(ap, irq_stat); - + handled += nv_host_intr(host->ports[i], irq_stat); irq_stat >>= NV_INT_PORT_SHIFT; } @@ -1599,15 +1578,15 @@ static int nv_hardreset(struct ata_link *link, unsigned int *class, int rc; if (!(ehc->i.flags & ATA_EHI_QUIET)) - ata_link_printk(link, KERN_INFO, "nv: skipping " - "hardreset on occupied port\n"); + ata_link_info(link, + "nv: skipping hardreset on occupied port\n"); /* make sure the link is online */ rc = sata_link_resume(link, timing, deadline); /* whine about phy resume failure but proceed */ if (rc && rc != -EOPNOTSUPP) - ata_link_printk(link, KERN_WARNING, "failed to resume " - "link (errno=%d)\n", rc); + ata_link_warn(link, "failed to resume link (errno=%d)\n", + rc); } /* device signature acquisition is unreliable */ @@ -1673,7 +1652,6 @@ static void nv_mcp55_freeze(struct ata_port *ap) mask = readl(mmio_base + NV_INT_ENABLE_MCP55); mask &= ~(NV_INT_ALL_MCP55 << shift); writel(mask, mmio_base + NV_INT_ENABLE_MCP55); - ata_sff_freeze(ap); } static void nv_mcp55_thaw(struct ata_port *ap) @@ -1687,7 +1665,6 @@ static void nv_mcp55_thaw(struct ata_port *ap) mask = readl(mmio_base + NV_INT_ENABLE_MCP55); mask |= (NV_INT_MASK_MCP55 << shift); writel(mask, mmio_base + NV_INT_ENABLE_MCP55); - ata_sff_thaw(ap); } static void nv_adma_error_handler(struct ata_port *ap) @@ -1706,7 +1683,7 @@ static void nv_adma_error_handler(struct ata_port *ap) u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT); u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX); - ata_port_printk(ap, KERN_ERR, + ata_port_err(ap, "EH in ADMA mode, notifier 0x%X " "notifier_error 0x%X gen_ctl 0x%X status 0x%X " "next cpb count 0x%X next cpb idx 0x%x\n", @@ -1717,7 +1694,7 @@ static void nv_adma_error_handler(struct ata_port *ap) struct nv_adma_cpb *cpb = &pp->cpb[i]; if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) || ap->link.sactive & (1 << i)) - ata_port_printk(ap, KERN_ERR, + ata_port_err(ap, "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n", i, cpb->ctl_flags, cpb->resp_flags); } @@ -1743,7 +1720,7 @@ static void nv_adma_error_handler(struct ata_port *ap) readw(mmio + NV_ADMA_CTL); /* flush posted write */ } - ata_sff_error_handler(ap); + ata_bmdma_error_handler(ap); } static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) @@ -1819,23 +1796,22 @@ static void nv_swncq_ncq_stop(struct ata_port *ap) u32 sactive; u32 done_mask; - ata_port_printk(ap, KERN_ERR, - "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n", - ap->qc_active, ap->link.sactive); - ata_port_printk(ap, KERN_ERR, + ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n", + ap->qc_active, ap->link.sactive); + ata_port_err(ap, "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n " "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n", pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag, pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits); - ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n", - ap->ops->sff_check_status(ap), - ioread8(ap->ioaddr.error_addr)); + ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n", + ap->ops->sff_check_status(ap), + ioread8(ap->ioaddr.error_addr)); sactive = readl(pp->sactive_block); done_mask = pp->qc_active ^ sactive; - ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n"); + ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n"); for (i = 0; i < ATA_MAX_QUEUE; i++) { u8 err = 0; if (pp->qc_active & (1 << i)) @@ -1845,13 +1821,13 @@ static void nv_swncq_ncq_stop(struct ata_port *ap) else continue; - ata_port_printk(ap, KERN_ERR, - "tag 0x%x: %01x %01x %01x %01x %s\n", i, - (pp->dhfis_bits >> i) & 0x1, - (pp->dmafis_bits >> i) & 0x1, - (pp->sdbfis_bits >> i) & 0x1, - (sactive >> i) & 0x1, - (err ? "error! tag doesn't exit" : " ")); + ata_port_err(ap, + "tag 0x%x: %01x %01x %01x %01x %s\n", i, + (pp->dhfis_bits >> i) & 0x1, + (pp->dmafis_bits >> i) & 0x1, + (pp->sdbfis_bits >> i) & 0x1, + (sactive >> i) & 0x1, + (err ? "error! tag doesn't exit" : " ")); } nv_swncq_pp_reinit(ap); @@ -1869,7 +1845,7 @@ static void nv_swncq_error_handler(struct ata_port *ap) ehc->i.action |= ATA_EH_RESET; } - ata_sff_error_handler(ap); + ata_bmdma_error_handler(ap); } #ifdef CONFIG_PM @@ -1976,8 +1952,8 @@ static int nv_swncq_slave_config(struct scsi_device *sdev) if (strncmp(model_num, "Maxtor", 6) == 0) { ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT); - ata_dev_printk(dev, KERN_NOTICE, - "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth); + ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n", + sdev->queue_depth); } return rc; @@ -1990,7 +1966,8 @@ static int nv_swncq_port_start(struct ata_port *ap) struct nv_swncq_port_priv *pp; int rc; - rc = ata_port_start(ap); + /* we might fallback to bmdma, allocate bmdma resources */ + rc = ata_bmdma_port_start(ap); if (rc) return rc; @@ -2015,7 +1992,7 @@ static int nv_swncq_port_start(struct ata_port *ap) static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) { if (qc->tf.protocol != ATA_PROT_NCQ) { - ata_sff_qc_prep(qc); + ata_bmdma_qc_prep(qc); return; } @@ -2030,7 +2007,7 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; struct scatterlist *sg; struct nv_swncq_port_priv *pp = ap->private_data; - struct ata_prd *prd; + struct ata_bmdma_prd *prd; unsigned int si, idx; prd = pp->prd + ATA_MAX_PRD * qc->tag; @@ -2091,7 +2068,7 @@ static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc) struct nv_swncq_port_priv *pp = ap->private_data; if (qc->tf.protocol != ATA_PROT_NCQ) - return ata_sff_qc_issue(qc); + return ata_bmdma_qc_issue(qc); DPRINTK("Enter\n"); @@ -2134,15 +2111,13 @@ static int nv_swncq_sdbfis(struct ata_port *ap) struct nv_swncq_port_priv *pp = ap->private_data; struct ata_eh_info *ehi = &ap->link.eh_info; u32 sactive; - int nr_done = 0; u32 done_mask; - int i; u8 host_stat; u8 lack_dhfis = 0; host_stat = ap->ops->bmdma_status(ap); if (unlikely(host_stat & ATA_DMA_ERR)) { - /* error when transfering data to/from memory */ + /* error when transferring data to/from memory */ ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); ehi->err_mask |= AC_ERR_HOST_BUS; @@ -2156,41 +2131,24 @@ static int nv_swncq_sdbfis(struct ata_port *ap) sactive = readl(pp->sactive_block); done_mask = pp->qc_active ^ sactive; - if (unlikely(done_mask & sactive)) { - ata_ehi_clear_desc(ehi); - ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition" - "(%08x->%08x)", pp->qc_active, sactive); - ehi->err_mask |= AC_ERR_HSM; - ehi->action |= ATA_EH_RESET; - return -EINVAL; - } - for (i = 0; i < ATA_MAX_QUEUE; i++) { - if (!(done_mask & (1 << i))) - continue; - - qc = ata_qc_from_tag(ap, i); - if (qc) { - ata_qc_complete(qc); - pp->qc_active &= ~(1 << i); - pp->dhfis_bits &= ~(1 << i); - pp->dmafis_bits &= ~(1 << i); - pp->sdbfis_bits |= (1 << i); - nr_done++; - } - } + pp->qc_active &= ~done_mask; + pp->dhfis_bits &= ~done_mask; + pp->dmafis_bits &= ~done_mask; + pp->sdbfis_bits |= done_mask; + ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); if (!ap->qc_active) { DPRINTK("over\n"); nv_swncq_pp_reinit(ap); - return nr_done; + return 0; } if (pp->qc_active & pp->dhfis_bits) - return nr_done; + return 0; if ((pp->ncq_flags & ncq_saw_backout) || (pp->qc_active ^ pp->dhfis_bits)) - /* if the controller cann't get a device to host register FIS, + /* if the controller can't get a device to host register FIS, * The driver needs to reissue the new command. */ lack_dhfis = 1; @@ -2207,7 +2165,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap) if (lack_dhfis) { qc = ata_qc_from_tag(ap, pp->last_issue_tag); nv_swncq_issue_atacmd(ap, qc); - return nr_done; + return 0; } if (pp->defer_queue.defer_bits) { @@ -2217,7 +2175,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap) nv_swncq_issue_atacmd(ap, qc); } - return nr_done; + return 0; } static inline u32 nv_swncq_tag(struct ata_port *ap) @@ -2229,7 +2187,7 @@ static inline u32 nv_swncq_tag(struct ata_port *ap) return (tag & 0x1f); } -static int nv_swncq_dmafis(struct ata_port *ap) +static void nv_swncq_dmafis(struct ata_port *ap) { struct ata_queued_cmd *qc; unsigned int rw; @@ -2244,7 +2202,7 @@ static int nv_swncq_dmafis(struct ata_port *ap) qc = ata_qc_from_tag(ap, tag); if (unlikely(!qc)) - return 0; + return; rw = qc->tf.flags & ATA_TFLAG_WRITE; @@ -2259,8 +2217,6 @@ static int nv_swncq_dmafis(struct ata_port *ap) dmactl |= ATA_DMA_WR; iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - - return 1; } static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis) @@ -2270,7 +2226,6 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis) struct ata_eh_info *ehi = &ap->link.eh_info; u32 serror; u8 ata_stat; - int rc = 0; ata_stat = ap->ops->sff_check_status(ap); nv_swncq_irq_clear(ap, fis); @@ -2315,8 +2270,7 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis) "dhfis 0x%X dmafis 0x%X sactive 0x%X\n", ap->print_id, pp->qc_active, pp->dhfis_bits, pp->dmafis_bits, readl(pp->sactive_block)); - rc = nv_swncq_sdbfis(ap); - if (rc < 0) + if (nv_swncq_sdbfis(ap) < 0) goto irq_error; } @@ -2353,7 +2307,7 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis) */ pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap)); pp->ncq_flags |= ncq_saw_dmas; - rc = nv_swncq_dmafis(ap); + nv_swncq_dmafis(ap); } irq_exit: @@ -2379,16 +2333,14 @@ static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance) for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; - if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { - if (ap->link.sactive) { - nv_swncq_host_interrupt(ap, (u16)irq_stat); - handled = 1; - } else { - if (irq_stat) /* reserve Hotplug */ - nv_swncq_irq_clear(ap, 0xfff0); + if (ap->link.sactive) { + nv_swncq_host_interrupt(ap, (u16)irq_stat); + handled = 1; + } else { + if (irq_stat) /* reserve Hotplug */ + nv_swncq_irq_clear(ap, 0xfff0); - handled += nv_host_intr(ap, (u8)irq_stat); - } + handled += nv_host_intr(ap, (u8)irq_stat); } irq_stat >>= NV_INT_PORT_SHIFT_MCP55; } @@ -2400,7 +2352,6 @@ static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance) static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version; const struct ata_port_info *ppi[] = { NULL, NULL }; struct nv_pi_priv *ipriv; struct ata_host *host; @@ -2417,8 +2368,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_resource_start(pdev, bar) == 0) return -ENODEV; - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + ata_print_version_once(&pdev->dev, DRV_VERSION); rc = pcim_enable_device(pdev); if (rc) @@ -2426,16 +2376,16 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* determine type and allocate host */ if (type == CK804 && adma_enabled) { - dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n"); + dev_notice(&pdev->dev, "Using ADMA mode\n"); type = ADMA; } else if (type == MCP5x && swncq_enabled) { - dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n"); + dev_notice(&pdev->dev, "Using SWNCQ mode\n"); type = SWNCQ; } ppi[0] = &nv_port_info[type]; ipriv = ppi[0]->private_data; - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; @@ -2473,19 +2423,18 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) nv_swncq_host_init(host); if (msi_enabled) { - dev_printk(KERN_NOTICE, &pdev->dev, "Using MSI\n"); + dev_notice(&pdev->dev, "Using MSI\n"); pci_enable_msi(pdev); } pci_set_master(pdev); - return ata_host_activate(host, pdev->irq, ipriv->irq_handler, - IRQF_SHARED, ipriv->sht); + return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int nv_pci_device_resume(struct pci_dev *pdev) { - struct ata_host *host = dev_get_drvdata(&pdev->dev); + struct ata_host *host = pci_get_drvdata(pdev); struct nv_host_priv *hpriv = host->private_data; int rc; @@ -2560,22 +2509,11 @@ static void nv_adma_host_stop(struct ata_host *host) nv_ck804_host_stop(host); } -static int __init nv_init(void) -{ - return pci_register_driver(&nv_pci_driver); -} +module_pci_driver(nv_pci_driver); -static void __exit nv_exit(void) -{ - pci_unregister_driver(&nv_pci_driver); -} - -module_init(nv_init); -module_exit(nv_exit); module_param_named(adma, adma_enabled, bool, 0444); MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)"); module_param_named(swncq, swncq_enabled, bool, 0444); MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)"); module_param_named(msi, msi_enabled, bool, 0444); MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)"); - |
