diff options
Diffstat (limited to 'drivers/ata/libata-sff.c')
| -rw-r--r-- | drivers/ata/libata-sff.c | 2332 |
1 files changed, 1450 insertions, 882 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 15499522e64..1121153f1ec 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -1,7 +1,7 @@ /* * libata-sff.c - helper library for PCI IDE BMDMA * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -33,16 +33,20 @@ */ #include <linux/kernel.h> +#include <linux/gfp.h> #include <linux/pci.h> +#include <linux/module.h> #include <linux/libata.h> #include <linux/highmem.h> #include "libata.h" +static struct workqueue_struct *ata_sff_wq; + const struct ata_port_operations ata_sff_port_ops = { .inherits = &ata_base_port_ops, - .qc_prep = ata_sff_qc_prep, + .qc_prep = ata_noop_qc_prep, .qc_issue = ata_sff_qc_issue, .qc_fill_rtf = ata_sff_qc_fill_rtf, @@ -53,7 +57,6 @@ const struct ata_port_operations ata_sff_port_ops = { .hardreset = sata_sff_hardreset, .postreset = ata_sff_postreset, .error_handler = ata_sff_error_handler, - .post_internal_cmd = ata_sff_post_internal_cmd, .sff_dev_select = ata_sff_dev_select, .sff_check_status = ata_sff_check_status, @@ -61,205 +64,144 @@ const struct ata_port_operations ata_sff_port_ops = { .sff_tf_read = ata_sff_tf_read, .sff_exec_command = ata_sff_exec_command, .sff_data_xfer = ata_sff_data_xfer, - .sff_irq_on = ata_sff_irq_on, - .sff_irq_clear = ata_sff_irq_clear, - - .port_start = ata_sff_port_start, -}; - -const struct ata_port_operations ata_bmdma_port_ops = { - .inherits = &ata_sff_port_ops, + .sff_drain_fifo = ata_sff_drain_fifo, - .mode_filter = ata_bmdma_mode_filter, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, + .lost_interrupt = ata_sff_lost_interrupt, }; +EXPORT_SYMBOL_GPL(ata_sff_port_ops); /** - * ata_fill_sg - Fill PCI IDE PRD table - * @qc: Metadata associated with taskfile to be transferred + * ata_sff_check_status - Read device status reg & clear interrupt + * @ap: port where the device is * - * Fill PCI IDE PRD (scatter-gather) table with segments - * associated with the current disk command. + * Reads ATA taskfile status register for currently-selected device + * and return its value. This also clears pending interrupts + * from this device * * LOCKING: - * spin_lock_irqsave(host lock) - * + * Inherited from caller. */ -static void ata_fill_sg(struct ata_queued_cmd *qc) +u8 ata_sff_check_status(struct ata_port *ap) { - struct ata_port *ap = qc->ap; - struct scatterlist *sg; - unsigned int si, pi; - - pi = 0; - for_each_sg(qc->sg, sg, qc->n_elem, si) { - u32 addr, offset; - u32 sg_len, len; - - /* determine if physical DMA addr spans 64K boundary. - * Note h/w doesn't support 64-bit, so we unconditionally - * truncate dma_addr_t to u32. - */ - addr = (u32) sg_dma_address(sg); - sg_len = sg_dma_len(sg); - - while (sg_len) { - offset = addr & 0xffff; - len = sg_len; - if ((offset + sg_len) > 0x10000) - len = 0x10000 - offset; - - ap->prd[pi].addr = cpu_to_le32(addr); - ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff); - VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); - - pi++; - sg_len -= len; - addr += len; - } - } - - ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); + return ioread8(ap->ioaddr.status_addr); } +EXPORT_SYMBOL_GPL(ata_sff_check_status); /** - * ata_fill_sg_dumb - Fill PCI IDE PRD table - * @qc: Metadata associated with taskfile to be transferred + * ata_sff_altstatus - Read device alternate status reg + * @ap: port where the device is * - * Fill PCI IDE PRD (scatter-gather) table with segments - * associated with the current disk command. Perform the fill - * so that we avoid writing any length 64K records for - * controllers that don't follow the spec. + * Reads ATA taskfile alternate status register for + * currently-selected device and return its value. * - * LOCKING: - * spin_lock_irqsave(host lock) + * Note: may NOT be used as the check_altstatus() entry in + * ata_port_operations. * + * LOCKING: + * Inherited from caller. */ -static void ata_fill_sg_dumb(struct ata_queued_cmd *qc) +static u8 ata_sff_altstatus(struct ata_port *ap) { - struct ata_port *ap = qc->ap; - struct scatterlist *sg; - unsigned int si, pi; - - pi = 0; - for_each_sg(qc->sg, sg, qc->n_elem, si) { - u32 addr, offset; - u32 sg_len, len, blen; - - /* determine if physical DMA addr spans 64K boundary. - * Note h/w doesn't support 64-bit, so we unconditionally - * truncate dma_addr_t to u32. - */ - addr = (u32) sg_dma_address(sg); - sg_len = sg_dma_len(sg); - - while (sg_len) { - offset = addr & 0xffff; - len = sg_len; - if ((offset + sg_len) > 0x10000) - len = 0x10000 - offset; - - blen = len & 0xffff; - ap->prd[pi].addr = cpu_to_le32(addr); - if (blen == 0) { - /* Some PATA chipsets like the CS5530 can't - cope with 0x0000 meaning 64K as the spec says */ - ap->prd[pi].flags_len = cpu_to_le32(0x8000); - blen = 0x8000; - ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000); - } - ap->prd[pi].flags_len = cpu_to_le32(blen); - VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); - - pi++; - sg_len -= len; - addr += len; - } - } + if (ap->ops->sff_check_altstatus) + return ap->ops->sff_check_altstatus(ap); - ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); + return ioread8(ap->ioaddr.altstatus_addr); } /** - * ata_sff_qc_prep - Prepare taskfile for submission - * @qc: Metadata associated with taskfile to be prepared + * ata_sff_irq_status - Check if the device is busy + * @ap: port where the device is * - * Prepare ATA taskfile for submission. + * Determine if the port is currently busy. Uses altstatus + * if available in order to avoid clearing shared IRQ status + * when finding an IRQ source. Non ctl capable devices don't + * share interrupt lines fortunately for us. * * LOCKING: - * spin_lock_irqsave(host lock) + * Inherited from caller. */ -void ata_sff_qc_prep(struct ata_queued_cmd *qc) +static u8 ata_sff_irq_status(struct ata_port *ap) { - if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + u8 status; - ata_fill_sg(qc); + if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { + status = ata_sff_altstatus(ap); + /* Not us: We are busy */ + if (status & ATA_BUSY) + return status; + } + /* Clear INTRQ latch */ + status = ap->ops->sff_check_status(ap); + return status; } /** - * ata_sff_dumb_qc_prep - Prepare taskfile for submission - * @qc: Metadata associated with taskfile to be prepared + * ata_sff_sync - Flush writes + * @ap: Port to wait for. * - * Prepare ATA taskfile for submission. + * CAUTION: + * If we have an mmio device with no ctl and no altstatus + * method this will fail. No such devices are known to exist. * * LOCKING: - * spin_lock_irqsave(host lock) + * Inherited from caller. */ -void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc) -{ - if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; - ata_fill_sg_dumb(qc); +static void ata_sff_sync(struct ata_port *ap) +{ + if (ap->ops->sff_check_altstatus) + ap->ops->sff_check_altstatus(ap); + else if (ap->ioaddr.altstatus_addr) + ioread8(ap->ioaddr.altstatus_addr); } /** - * ata_sff_check_status - Read device status reg & clear interrupt - * @ap: port where the device is + * ata_sff_pause - Flush writes and wait 400nS + * @ap: Port to pause for. * - * Reads ATA taskfile status register for currently-selected device - * and return its value. This also clears pending interrupts - * from this device + * CAUTION: + * If we have an mmio device with no ctl and no altstatus + * method this will fail. No such devices are known to exist. * * LOCKING: * Inherited from caller. */ -u8 ata_sff_check_status(struct ata_port *ap) + +void ata_sff_pause(struct ata_port *ap) { - return ioread8(ap->ioaddr.status_addr); + ata_sff_sync(ap); + ndelay(400); } +EXPORT_SYMBOL_GPL(ata_sff_pause); /** - * ata_sff_altstatus - Read device alternate status reg - * @ap: port where the device is - * - * Reads ATA taskfile alternate status register for - * currently-selected device and return its value. - * - * Note: may NOT be used as the check_altstatus() entry in - * ata_port_operations. + * ata_sff_dma_pause - Pause before commencing DMA + * @ap: Port to pause for. * - * LOCKING: - * Inherited from caller. + * Perform I/O fencing and ensure sufficient cycle delays occur + * for the HDMA1:0 transition */ -u8 ata_sff_altstatus(struct ata_port *ap) -{ - if (ap->ops->sff_check_altstatus) - return ap->ops->sff_check_altstatus(ap); - return ioread8(ap->ioaddr.altstatus_addr); +void ata_sff_dma_pause(struct ata_port *ap) +{ + if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { + /* An altstatus read will cause the needed delay without + messing up the IRQ status */ + ata_sff_altstatus(ap); + return; + } + /* There are no DMA controllers without ctl. BUG here to ensure + we never violate the HDMA1:0 transition timing and risk + corruption. */ + BUG(); } +EXPORT_SYMBOL_GPL(ata_sff_dma_pause); /** * ata_sff_busy_sleep - sleep until BSY clears, or timeout * @ap: port containing status register to be polled - * @tmout_pat: impatience timeout - * @tmout: overall timeout + * @tmout_pat: impatience timeout in msecs + * @tmout: overall timeout in msecs * * Sleep until ATA Status register bit BSY clears, * or a timeout occurs. @@ -278,22 +220,22 @@ int ata_sff_busy_sleep(struct ata_port *ap, status = ata_sff_busy_wait(ap, ATA_BUSY, 300); timer_start = jiffies; - timeout = timer_start + tmout_pat; + timeout = ata_deadline(timer_start, tmout_pat); while (status != 0xff && (status & ATA_BUSY) && time_before(jiffies, timeout)) { - msleep(50); + ata_msleep(ap, 50); status = ata_sff_busy_wait(ap, ATA_BUSY, 3); } if (status != 0xff && (status & ATA_BUSY)) - ata_port_printk(ap, KERN_WARNING, - "port is slow to respond, please be patient " - "(Status 0x%x)\n", status); + ata_port_warn(ap, + "port is slow to respond, please be patient (Status 0x%x)\n", + status); - timeout = timer_start + tmout; + timeout = ata_deadline(timer_start, tmout); while (status != 0xff && (status & ATA_BUSY) && time_before(jiffies, timeout)) { - msleep(50); + ata_msleep(ap, 50); status = ap->ops->sff_check_status(ap); } @@ -301,24 +243,21 @@ int ata_sff_busy_sleep(struct ata_port *ap, return -ENODEV; if (status & ATA_BUSY) { - ata_port_printk(ap, KERN_ERR, "port failed to respond " - "(%lu secs, Status 0x%x)\n", - tmout / HZ, status); + ata_port_err(ap, + "port failed to respond (%lu secs, Status 0x%x)\n", + DIV_ROUND_UP(tmout, 1000), status); return -EBUSY; } return 0; } +EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); static int ata_sff_check_ready(struct ata_link *link) { u8 status = link->ap->ops->sff_check_status(link->ap); - if (!(status & ATA_BUSY)) - return 1; - if (status == 0xff) - return -ENODEV; - return 0; + return ata_check_ready(status); } /** @@ -339,6 +278,28 @@ int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline) { return ata_wait_ready(link, deadline, ata_sff_check_ready); } +EXPORT_SYMBOL_GPL(ata_sff_wait_ready); + +/** + * ata_sff_set_devctl - Write device control reg + * @ap: port where the device is + * @ctl: value to write + * + * Writes ATA taskfile device control register. + * + * Note: may NOT be used as the sff_set_devctl() entry in + * ata_port_operations. + * + * LOCKING: + * Inherited from caller. + */ +static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl) +{ + if (ap->ops->sff_set_devctl) + ap->ops->sff_set_devctl(ap, ctl); + else + iowrite8(ctl, ap->ioaddr.ctl_addr); +} /** * ata_sff_dev_select - Select device 0/1 on ATA bus @@ -366,6 +327,7 @@ void ata_sff_dev_select(struct ata_port *ap, unsigned int device) iowrite8(tmp, ap->ioaddr.device_addr); ata_sff_pause(ap); /* needed; also flushes, for mmio */ } +EXPORT_SYMBOL_GPL(ata_sff_dev_select); /** * ata_dev_select - Select device 0/1 on ATA bus @@ -385,12 +347,12 @@ void ata_sff_dev_select(struct ata_port *ap, unsigned int device) * LOCKING: * caller. */ -void ata_dev_select(struct ata_port *ap, unsigned int device, +static void ata_dev_select(struct ata_port *ap, unsigned int device, unsigned int wait, unsigned int can_sleep) { if (ata_msg_probe(ap)) - ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, " - "device %u, wait %u\n", device, wait); + ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n", + device, wait); if (wait) ata_wait_idle(ap); @@ -399,7 +361,7 @@ void ata_dev_select(struct ata_port *ap, unsigned int device, if (wait) { if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) - msleep(150); + ata_msleep(ap, 150); ata_wait_idle(ap); } } @@ -411,46 +373,32 @@ void ata_dev_select(struct ata_port *ap, unsigned int device, * Enable interrupts on a legacy IDE device using MMIO or PIO, * wait for idle, clear any pending interrupts. * + * Note: may NOT be used as the sff_irq_on() entry in + * ata_port_operations. + * * LOCKING: * Inherited from caller. */ -u8 ata_sff_irq_on(struct ata_port *ap) +void ata_sff_irq_on(struct ata_port *ap) { struct ata_ioports *ioaddr = &ap->ioaddr; - u8 tmp; + + if (ap->ops->sff_irq_on) { + ap->ops->sff_irq_on(ap); + return; + } ap->ctl &= ~ATA_NIEN; ap->last_ctl = ap->ctl; - if (ioaddr->ctl_addr) - iowrite8(ap->ctl, ioaddr->ctl_addr); - tmp = ata_wait_idle(ap); - - ap->ops->sff_irq_clear(ap); - - return tmp; -} - -/** - * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt. - * @ap: Port associated with this ATA transaction. - * - * Clear interrupt and error flags in DMA status register. - * - * May be used as the irq_clear() entry in ata_port_operations. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -void ata_sff_irq_clear(struct ata_port *ap) -{ - void __iomem *mmio = ap->ioaddr.bmdma_addr; - - if (!mmio) - return; + if (ap->ops->sff_set_devctl || ioaddr->ctl_addr) + ata_sff_set_devctl(ap, ap->ctl); + ata_wait_idle(ap); - iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); } +EXPORT_SYMBOL_GPL(ata_sff_irq_on); /** * ata_sff_tf_load - send taskfile registers to host controller @@ -475,7 +423,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) } if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { - WARN_ON(!ioaddr->ctl_addr); + WARN_ON_ONCE(!ioaddr->ctl_addr); iowrite8(tf->hob_feature, ioaddr->feature_addr); iowrite8(tf->hob_nsect, ioaddr->nsect_addr); iowrite8(tf->hob_lbal, ioaddr->lbal_addr); @@ -510,6 +458,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) ata_wait_idle(ap); } +EXPORT_SYMBOL_GPL(ata_sff_tf_load); /** * ata_sff_tf_read - input device's ATA taskfile shadow registers @@ -547,9 +496,10 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) iowrite8(tf->ctl, ioaddr->ctl_addr); ap->last_ctl = tf->ctl; } else - WARN_ON(1); + WARN_ON_ONCE(1); } } +EXPORT_SYMBOL_GPL(ata_sff_tf_read); /** * ata_sff_exec_command - issue ATA command to host controller @@ -569,6 +519,7 @@ void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) iowrite8(tf->command, ap->ioaddr.command_addr); ata_sff_pause(ap); } +EXPORT_SYMBOL_GPL(ata_sff_exec_command); /** * ata_tf_to_host - issue ATA taskfile to host controller @@ -617,23 +568,93 @@ unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf, else iowrite16_rep(data_addr, buf, words); - /* Transfer trailing 1 byte, if any. */ + /* Transfer trailing byte, if any. */ if (unlikely(buflen & 0x01)) { - __le16 align_buf[1] = { 0 }; - unsigned char *trailing_buf = buf + buflen - 1; + unsigned char pad[2] = { }; + + /* Point buf to the tail of buffer */ + buf += buflen - 1; + /* + * Use io*16_rep() accessors here as well to avoid pointlessly + * swapping bytes to and from on the big endian machines... + */ if (rw == READ) { - align_buf[0] = cpu_to_le16(ioread16(data_addr)); - memcpy(trailing_buf, align_buf, 1); + ioread16_rep(data_addr, pad, 1); + *buf = pad[0]; } else { - memcpy(align_buf, trailing_buf, 1); - iowrite16(le16_to_cpu(align_buf[0]), data_addr); + pad[0] = *buf; + iowrite16_rep(data_addr, pad, 1); } words++; } return words << 1; } +EXPORT_SYMBOL_GPL(ata_sff_data_xfer); + +/** + * ata_sff_data_xfer32 - Transfer data by PIO + * @dev: device to target + * @buf: data buffer + * @buflen: buffer length + * @rw: read/write + * + * Transfer data from/to the device data register by PIO using 32bit + * I/O operations. + * + * LOCKING: + * Inherited from caller. + * + * RETURNS: + * Bytes consumed. + */ + +unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf, + unsigned int buflen, int rw) +{ + struct ata_port *ap = dev->link->ap; + void __iomem *data_addr = ap->ioaddr.data_addr; + unsigned int words = buflen >> 2; + int slop = buflen & 3; + + if (!(ap->pflags & ATA_PFLAG_PIO32)) + return ata_sff_data_xfer(dev, buf, buflen, rw); + + /* Transfer multiple of 4 bytes */ + if (rw == READ) + ioread32_rep(data_addr, buf, words); + else + iowrite32_rep(data_addr, buf, words); + + /* Transfer trailing bytes, if any */ + if (unlikely(slop)) { + unsigned char pad[4] = { }; + + /* Point buf to the tail of buffer */ + buf += buflen - slop; + + /* + * Use io*_rep() accessors here as well to avoid pointlessly + * swapping bytes to and from on the big endian machines... + */ + if (rw == READ) { + if (slop < 3) + ioread16_rep(data_addr, pad, 1); + else + ioread32_rep(data_addr, pad, 1); + memcpy(buf, pad, slop); + } else { + memcpy(pad, buf, slop); + if (slop < 3) + iowrite16_rep(data_addr, pad, 1); + else + iowrite32_rep(data_addr, pad, 1); + } + } + return (buflen + 1) & ~1; +} +EXPORT_SYMBOL_GPL(ata_sff_data_xfer32); /** * ata_sff_data_xfer_noirq - Transfer data by PIO @@ -658,11 +679,12 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, unsigned int consumed; local_irq_save(flags); - consumed = ata_sff_data_xfer(dev, buf, buflen, rw); + consumed = ata_sff_data_xfer32(dev, buf, buflen, rw); local_irq_restore(flags); return consumed; } +EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq); /** * ata_pio_sector - Transfer a sector of data. @@ -698,13 +720,13 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) /* FIXME: use a bounce buffer */ local_irq_save(flags); - buf = kmap_atomic(page, KM_IRQ0); + buf = kmap_atomic(page); /* do the actual data transfer */ ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, do_write); - kunmap_atomic(buf, KM_IRQ0); + kunmap_atomic(buf); local_irq_restore(flags); } else { buf = page_address(page); @@ -712,6 +734,9 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) do_write); } + if (!do_write && !PageSlab(page)) + flush_dcache_page(page); + qc->curbytes += qc->sect_size; qc->cursg_ofs += qc->sect_size; @@ -737,7 +762,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc) /* READ/WRITE MULTIPLE */ unsigned int nsect; - WARN_ON(qc->dev->multi_count == 0); + WARN_ON_ONCE(qc->dev->multi_count == 0); nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, qc->dev->multi_count); @@ -746,7 +771,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc) } else ata_pio_sector(qc); - ata_sff_altstatus(qc->ap); /* flush */ + ata_sff_sync(qc->ap); /* flush */ } /** @@ -764,11 +789,12 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) { /* send SCSI cdb */ DPRINTK("send cdb\n"); - WARN_ON(qc->dev->cdb_len < 12); + WARN_ON_ONCE(qc->dev->cdb_len < 12); ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); - ata_sff_altstatus(ap); /* flush */ - + ata_sff_sync(ap); + /* FIXME: If the CDB is for DMA do we need to do the transition delay + or is bmdma_start guaranteed to do it ? */ switch (qc->tf.protocol) { case ATAPI_PROT_PIO: ap->hsm_task_state = HSM_ST; @@ -776,11 +802,15 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) case ATAPI_PROT_NODATA: ap->hsm_task_state = HSM_ST_LAST; break; +#ifdef CONFIG_ATA_BMDMA case ATAPI_PROT_DMA: ap->hsm_task_state = HSM_ST_LAST; /* initiate bmdma */ ap->ops->bmdma_start(qc); break; +#endif /* CONFIG_ATA_BMDMA */ + default: + BUG(); } } @@ -835,16 +865,18 @@ next_sg: /* FIXME: use bounce buffer */ local_irq_save(flags); - buf = kmap_atomic(page, KM_IRQ0); + buf = kmap_atomic(page); /* do the actual data transfer */ - consumed = ap->ops->sff_data_xfer(dev, buf + offset, count, rw); + consumed = ap->ops->sff_data_xfer(dev, buf + offset, + count, rw); - kunmap_atomic(buf, KM_IRQ0); + kunmap_atomic(buf); local_irq_restore(flags); } else { buf = page_address(page); - consumed = ap->ops->sff_data_xfer(dev, buf + offset, count, rw); + consumed = ap->ops->sff_data_xfer(dev, buf + offset, + count, rw); } bytes -= min(bytes, consumed); @@ -856,9 +888,12 @@ next_sg: qc->cursg_ofs = 0; } - /* consumed can be larger than count only for the last transfer */ - WARN_ON(qc->cursg && count != consumed); - + /* + * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed); + * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN + * check correctly as it doesn't know if it is the last request being + * made. Somebody should implement a proper sanity check. + */ if (bytes) goto next_sg; return 0; @@ -894,11 +929,11 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) bytes = (bc_hi << 8) | bc_lo; /* shall be cleared to zero, indicating xfer of data */ - if (unlikely(ireason & (1 << 0))) + if (unlikely(ireason & ATAPI_COD)) goto atapi_check; /* make sure transfer direction matches expected */ - i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; + i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0; if (unlikely(do_write != i_write)) goto atapi_check; @@ -909,7 +944,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) if (unlikely(__atapi_pio_bytes(qc, bytes))) goto err_out; - ata_sff_altstatus(ap); /* flush */ + ata_sff_sync(ap); /* flush */ return; @@ -929,18 +964,19 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) * RETURNS: * 1 if ok in workqueue, 0 otherwise. */ -static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) +static inline int ata_hsm_ok_in_wq(struct ata_port *ap, + struct ata_queued_cmd *qc) { if (qc->tf.flags & ATA_TFLAG_POLLING) return 1; if (ap->hsm_task_state == HSM_ST_FIRST) { if (qc->tf.protocol == ATA_PROT_PIO && - (qc->tf.flags & ATA_TFLAG_WRITE)) + (qc->tf.flags & ATA_TFLAG_WRITE)) return 1; if (ata_is_atapi(qc->tf.protocol) && - !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) + !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) return 1; } @@ -973,7 +1009,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) qc = ata_qc_from_tag(ap, qc->tag); if (qc) { if (likely(!(qc->err_mask & AC_ERR_HSM))) { - ap->ops->sff_irq_on(ap); + ata_sff_irq_on(ap); ata_qc_complete(qc); } else ata_port_freeze(ap); @@ -989,7 +1025,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) } else { if (in_wq) { spin_lock_irqsave(ap->lock, flags); - ap->ops->sff_irq_on(ap); + ata_sff_irq_on(ap); ata_qc_complete(qc); spin_unlock_irqrestore(ap->lock, flags); } else @@ -1010,16 +1046,18 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, u8 status, int in_wq) { + struct ata_link *link = qc->dev->link; + struct ata_eh_info *ehi = &link->eh_info; unsigned long flags = 0; int poll_next; - WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); + WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); /* Make sure ata_sff_qc_issue() does not throw things * like DMA polling into the workqueue. Notice that * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). */ - WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); + WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); fsm_start: DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", @@ -1041,9 +1079,12 @@ fsm_start: if (likely(status & (ATA_ERR | ATA_DF))) /* device stops HSM for abort/error */ qc->err_mask |= AC_ERR_DEV; - else + else { /* HSM violation. Let EH handle this */ + ata_ehi_push_desc(ehi, + "ST_FIRST: !(DRQ|ERR|DF)"); qc->err_mask |= AC_ERR_HSM; + } ap->hsm_task_state = HSM_ST_ERR; goto fsm_start; @@ -1062,9 +1103,9 @@ fsm_start: * the CDB. */ if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { - ata_port_printk(ap, KERN_WARNING, - "DRQ=1 with device error, " - "dev_stat 0x%X\n", status); + ata_ehi_push_desc(ehi, "ST_FIRST: " + "DRQ=1 with device error, " + "dev_stat 0x%X", status); qc->err_mask |= AC_ERR_HSM; ap->hsm_task_state = HSM_ST_ERR; goto fsm_start; @@ -1097,7 +1138,7 @@ fsm_start: if (in_wq) spin_unlock_irqrestore(ap->lock, flags); - /* if polling, ata_pio_task() handles the rest. + /* if polling, ata_sff_pio_task() handles the rest. * otherwise, interrupt handler takes over from here. */ break; @@ -1121,9 +1162,9 @@ fsm_start: * let the EH abort the command or reset the device. */ if (unlikely(status & (ATA_ERR | ATA_DF))) { - ata_port_printk(ap, KERN_WARNING, "DRQ=1 with " - "device error, dev_stat 0x%X\n", - status); + ata_ehi_push_desc(ehi, "ST-ATAPI: " + "DRQ=1 with device error, " + "dev_stat 0x%X", status); qc->err_mask |= AC_ERR_HSM; ap->hsm_task_state = HSM_ST_ERR; goto fsm_start; @@ -1139,16 +1180,29 @@ fsm_start: /* ATA PIO protocol */ if (unlikely((status & ATA_DRQ) == 0)) { /* handle BSY=0, DRQ=0 as error */ - if (likely(status & (ATA_ERR | ATA_DF))) + if (likely(status & (ATA_ERR | ATA_DF))) { /* device stops HSM for abort/error */ qc->err_mask |= AC_ERR_DEV; - else + + /* If diagnostic failed and this is + * IDENTIFY, it's likely a phantom + * device. Mark hint. + */ + if (qc->dev->horkage & + ATA_HORKAGE_DIAGNOSTIC) + qc->err_mask |= + AC_ERR_NODEV_HINT; + } else { /* HSM violation. Let EH handle this. * Phantom devices also trigger this * condition. Mark hint. */ + ata_ehi_push_desc(ehi, "ST-ATA: " + "DRQ=0 without device error, " + "dev_stat 0x%X", status); qc->err_mask |= AC_ERR_HSM | AC_ERR_NODEV_HINT; + } ap->hsm_task_state = HSM_ST_ERR; goto fsm_start; @@ -1173,8 +1227,22 @@ fsm_start: status = ata_wait_idle(ap); } - if (status & (ATA_BUSY | ATA_DRQ)) + if (status & (ATA_BUSY | ATA_DRQ)) { + ata_ehi_push_desc(ehi, "ST-ATA: " + "BUSY|DRQ persists on ERR|DF, " + "dev_stat 0x%X", status); qc->err_mask |= AC_ERR_HSM; + } + + /* There are oddball controllers with + * status register stuck at 0x7f and + * lbal/m/h at zero which makes it + * pass all other presence detection + * mechanisms we have. Set NODEV_HINT + * for it. Kernel bz#7241. + */ + if (status == 0x7f) + qc->err_mask |= AC_ERR_NODEV_HINT; /* ata_pio_sectors() might change the * state to HSM_ST_LAST. so, the state @@ -1208,7 +1276,7 @@ fsm_start: DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", ap->print_id, qc->dev->devno, status); - WARN_ON(qc->err_mask); + WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); ap->hsm_task_state = HSM_ST_IDLE; @@ -1219,11 +1287,6 @@ fsm_start: break; case HSM_ST_ERR: - /* make sure qc->err_mask is available to - * know what's wrong and recover - */ - WARN_ON(qc->err_mask == 0); - ap->hsm_task_state = HSM_ST_IDLE; /* complete taskfile transaction */ @@ -1238,17 +1301,64 @@ fsm_start: return poll_next; } +EXPORT_SYMBOL_GPL(ata_sff_hsm_move); + +void ata_sff_queue_work(struct work_struct *work) +{ + queue_work(ata_sff_wq, work); +} +EXPORT_SYMBOL_GPL(ata_sff_queue_work); + +void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay) +{ + queue_delayed_work(ata_sff_wq, dwork, delay); +} +EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work); + +void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay) +{ + struct ata_port *ap = link->ap; + + WARN_ON((ap->sff_pio_task_link != NULL) && + (ap->sff_pio_task_link != link)); + ap->sff_pio_task_link = link; + + /* may fail if ata_sff_flush_pio_task() in progress */ + ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay)); +} +EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task); -void ata_pio_task(struct work_struct *work) +void ata_sff_flush_pio_task(struct ata_port *ap) +{ + DPRINTK("ENTER\n"); + + cancel_delayed_work_sync(&ap->sff_pio_task); + ap->hsm_task_state = HSM_ST_IDLE; + ap->sff_pio_task_link = NULL; + + if (ata_msg_ctl(ap)) + ata_port_dbg(ap, "%s: EXIT\n", __func__); +} + +static void ata_sff_pio_task(struct work_struct *work) { struct ata_port *ap = - container_of(work, struct ata_port, port_task.work); - struct ata_queued_cmd *qc = ap->port_task_data; + container_of(work, struct ata_port, sff_pio_task.work); + struct ata_link *link = ap->sff_pio_task_link; + struct ata_queued_cmd *qc; u8 status; int poll_next; + BUG_ON(ap->sff_pio_task_link == NULL); + /* qc can be NULL if timeout occurred */ + qc = ata_qc_from_tag(ap, link->active_tag); + if (!qc) { + ap->sff_pio_task_link = NULL; + return; + } + fsm_start: - WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); + WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); /* * This is purely heuristic. This is a fast path. @@ -1259,14 +1369,19 @@ fsm_start: */ status = ata_sff_busy_wait(ap, ATA_BUSY, 5); if (status & ATA_BUSY) { - msleep(2); + ata_msleep(ap, 2); status = ata_sff_busy_wait(ap, ATA_BUSY, 10); if (status & ATA_BUSY) { - ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE); + ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); return; } } + /* + * hsm_move() may trigger another command to be processed. + * clean the link beforehand. + */ + ap->sff_pio_task_link = NULL; /* move the HSM */ poll_next = ata_sff_hsm_move(ap, qc, status, 1); @@ -1278,15 +1393,11 @@ fsm_start: } /** - * ata_sff_qc_issue - issue taskfile to device in proto-dependent manner + * ata_sff_qc_issue - issue taskfile to a SFF controller * @qc: command to issue to device * - * Using various libata functions and hooks, this function - * starts an ATA command. ATA commands are grouped into - * classes called "protocols", and issuing each type of protocol - * is slightly different. - * - * May be used as the qc_issue() entry in ata_port_operations. + * This function issues a PIO or NODATA command to a SFF + * controller. * * LOCKING: * spin_lock_irqsave(host lock) @@ -1297,27 +1408,13 @@ fsm_start: unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; + struct ata_link *link = qc->dev->link; /* Use polling pio if the LLD doesn't handle * interrupt driven pio and atapi CDB interrupt. */ - if (ap->flags & ATA_FLAG_PIO_POLLING) { - switch (qc->tf.protocol) { - case ATA_PROT_PIO: - case ATA_PROT_NODATA: - case ATAPI_PROT_PIO: - case ATAPI_PROT_NODATA: - qc->tf.flags |= ATA_TFLAG_POLLING; - break; - case ATAPI_PROT_DMA: - if (qc->dev->flags & ATA_DFLAG_CDB_INTR) - /* see ata_dma_blacklisted() */ - BUG(); - break; - default: - break; - } - } + if (ap->flags & ATA_FLAG_PIO_POLLING) + qc->tf.flags |= ATA_TFLAG_POLLING; /* select the device */ ata_dev_select(ap, qc->dev->devno, 1, 0); @@ -1332,19 +1429,10 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) ap->hsm_task_state = HSM_ST_LAST; if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_pio_queue_task(ap, qc, 0); + ata_sff_queue_pio_task(link, 0); break; - case ATA_PROT_DMA: - WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); - - ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ - ap->ops->bmdma_setup(qc); /* set up bmdma */ - ap->ops->bmdma_start(qc); /* initiate bmdma */ - ap->hsm_task_state = HSM_ST_LAST; - break; - case ATA_PROT_PIO: if (qc->tf.flags & ATA_TFLAG_POLLING) ata_qc_set_polling(qc); @@ -1354,20 +1442,21 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) if (qc->tf.flags & ATA_TFLAG_WRITE) { /* PIO data out protocol */ ap->hsm_task_state = HSM_ST_FIRST; - ata_pio_queue_task(ap, qc, 0); + ata_sff_queue_pio_task(link, 0); - /* always send first data block using - * the ata_pio_task() codepath. + /* always send first data block using the + * ata_sff_pio_task() codepath. */ } else { /* PIO data in protocol */ ap->hsm_task_state = HSM_ST; if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_pio_queue_task(ap, qc, 0); + ata_sff_queue_pio_task(link, 0); - /* if polling, ata_pio_task() handles the rest. - * otherwise, interrupt handler takes over from here. + /* if polling, ata_sff_pio_task() handles the + * rest. otherwise, interrupt handler takes + * over from here. */ } @@ -1385,28 +1474,17 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) /* send cdb by polling if no cdb interrupt */ if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || (qc->tf.flags & ATA_TFLAG_POLLING)) - ata_pio_queue_task(ap, qc, 0); - break; - - case ATAPI_PROT_DMA: - WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); - - ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ - ap->ops->bmdma_setup(qc); /* set up bmdma */ - ap->hsm_task_state = HSM_ST_FIRST; - - /* send cdb by polling if no cdb interrupt */ - if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) - ata_pio_queue_task(ap, qc, 0); + ata_sff_queue_pio_task(link, 0); break; default: - WARN_ON(1); + WARN_ON_ONCE(1); return AC_ERR_SYSTEM; } return 0; } +EXPORT_SYMBOL_GPL(ata_sff_qc_issue); /** * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read @@ -1426,27 +1504,29 @@ bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); return true; } +EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); -/** - * ata_sff_host_intr - Handle host interrupt for given (port, task) - * @ap: Port on which interrupt arrived (possibly...) - * @qc: Taskfile currently active in engine - * - * Handle host interrupt for given queued command. Currently, - * only DMA interrupts are handled. All other commands are - * handled via polling with interrupts disabled (nIEN bit). - * - * LOCKING: - * spin_lock_irqsave(host lock) - * - * RETURNS: - * One if interrupt was handled, zero if not (shared irq). - */ -inline unsigned int ata_sff_host_intr(struct ata_port *ap, - struct ata_queued_cmd *qc) +static unsigned int ata_sff_idle_irq(struct ata_port *ap) { - struct ata_eh_info *ehi = &ap->link.eh_info; - u8 status, host_stat = 0; + ap->stats.idle_irq++; + +#ifdef ATA_IRQ_TRAP + if ((ap->stats.idle_irq % 1000) == 0) { + ap->ops->sff_check_status(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); + ata_port_warn(ap, "irq trap\n"); + return 1; + } +#endif + return 0; /* irq not handled */ +} + +static unsigned int __ata_sff_port_intr(struct ata_port *ap, + struct ata_queued_cmd *qc, + bool hsmv_on_idle) +{ + u8 status; VPRINTK("ata%u: protocol %d task_state %d\n", ap->print_id, qc->tf.protocol, ap->hsm_task_state); @@ -1463,107 +1543,117 @@ inline unsigned int ata_sff_host_intr(struct ata_port *ap, * need to check ata_is_atapi(qc->tf.protocol) again. */ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) - goto idle_irq; - break; - case HSM_ST_LAST: - if (qc->tf.protocol == ATA_PROT_DMA || - qc->tf.protocol == ATAPI_PROT_DMA) { - /* check status of DMA engine */ - host_stat = ap->ops->bmdma_status(ap); - VPRINTK("ata%u: host_stat 0x%X\n", - ap->print_id, host_stat); - - /* if it's not our irq... */ - if (!(host_stat & ATA_DMA_INTR)) - goto idle_irq; - - /* before we do anything else, clear DMA-Start bit */ - ap->ops->bmdma_stop(qc); - - if (unlikely(host_stat & ATA_DMA_ERR)) { - /* error when transfering data to/from memory */ - qc->err_mask |= AC_ERR_HOST_BUS; - ap->hsm_task_state = HSM_ST_ERR; - } - } - break; - case HSM_ST: + return ata_sff_idle_irq(ap); break; + case HSM_ST_IDLE: + return ata_sff_idle_irq(ap); default: - goto idle_irq; + break; } - /* check altstatus */ - status = ata_sff_altstatus(ap); - if (status & ATA_BUSY) - goto idle_irq; - - /* check main status, clearing INTRQ */ - status = ap->ops->sff_check_status(ap); - if (unlikely(status & ATA_BUSY)) - goto idle_irq; + /* check main status, clearing INTRQ if needed */ + status = ata_sff_irq_status(ap); + if (status & ATA_BUSY) { + if (hsmv_on_idle) { + /* BMDMA engine is already stopped, we're screwed */ + qc->err_mask |= AC_ERR_HSM; + ap->hsm_task_state = HSM_ST_ERR; + } else + return ata_sff_idle_irq(ap); + } - /* ack bmdma irq events */ - ap->ops->sff_irq_clear(ap); + /* clear irq events */ + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); ata_sff_hsm_move(ap, qc, status, 0); - if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || - qc->tf.protocol == ATAPI_PROT_DMA)) - ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); - return 1; /* irq handled */ - -idle_irq: - ap->stats.idle_irq++; - -#ifdef ATA_IRQ_TRAP - if ((ap->stats.idle_irq % 1000) == 0) { - ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); - ata_port_printk(ap, KERN_WARNING, "irq trap\n"); - return 1; - } -#endif - return 0; /* irq not handled */ } /** - * ata_sff_interrupt - Default ATA host interrupt handler - * @irq: irq line (unused) - * @dev_instance: pointer to our ata_host information structure + * ata_sff_port_intr - Handle SFF port interrupt + * @ap: Port on which interrupt arrived (possibly...) + * @qc: Taskfile currently active in engine * - * Default interrupt handler for PCI IDE devices. Calls - * ata_sff_host_intr() for each port that is not disabled. + * Handle port interrupt for given queued command. * * LOCKING: - * Obtains host lock during operation. + * spin_lock_irqsave(host lock) * * RETURNS: - * IRQ_NONE or IRQ_HANDLED. + * One if interrupt was handled, zero if not (shared irq). */ -irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) +unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) +{ + return __ata_sff_port_intr(ap, qc, false); +} +EXPORT_SYMBOL_GPL(ata_sff_port_intr); + +static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance, + unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *)) { struct ata_host *host = dev_instance; + bool retried = false; unsigned int i; - unsigned int handled = 0; + unsigned int handled, idle, polling; unsigned long flags; /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ spin_lock_irqsave(&host->lock, flags); +retry: + handled = idle = polling = 0; for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap; + struct ata_port *ap = host->ports[i]; + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc) { + if (!(qc->tf.flags & ATA_TFLAG_POLLING)) + handled |= port_intr(ap, qc); + else + polling |= 1 << i; + } else + idle |= 1 << i; + } - ap = host->ports[i]; - if (ap && - !(ap->flags & ATA_FLAG_DISABLED)) { - struct ata_queued_cmd *qc; + /* + * If no port was expecting IRQ but the controller is actually + * asserting IRQ line, nobody cared will ensue. Check IRQ + * pending status if available and clear spurious IRQ. + */ + if (!handled && !retried) { + bool retry = false; + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + + if (polling & (1 << i)) + continue; + + if (!ap->ops->sff_irq_check || + !ap->ops->sff_irq_check(ap)) + continue; + + if (idle & (1 << i)) { + ap->ops->sff_check_status(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); + } else { + /* clear INTRQ and check if BUSY cleared */ + if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) + retry |= true; + /* + * With command in flight, we can't do + * sff_irq_clear() w/o racing with completion. + */ + } + } - qc = ata_qc_from_tag(ap, ap->link.active_tag); - if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && - (qc->flags & ATA_QCFLAG_ACTIVE)) - handled |= ata_sff_host_intr(ap, qc); + if (retry) { + retried = true; + goto retry; } } @@ -1573,23 +1663,80 @@ irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) } /** + * ata_sff_interrupt - Default SFF ATA host interrupt handler + * @irq: irq line (unused) + * @dev_instance: pointer to our ata_host information structure + * + * Default interrupt handler for PCI IDE devices. Calls + * ata_sff_port_intr() for each port that is not disabled. + * + * LOCKING: + * Obtains host lock during operation. + * + * RETURNS: + * IRQ_NONE or IRQ_HANDLED. + */ +irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) +{ + return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr); +} +EXPORT_SYMBOL_GPL(ata_sff_interrupt); + +/** + * ata_sff_lost_interrupt - Check for an apparent lost interrupt + * @ap: port that appears to have timed out + * + * Called from the libata error handlers when the core code suspects + * an interrupt has been lost. If it has complete anything we can and + * then return. Interface must support altstatus for this faster + * recovery to occur. + * + * Locking: + * Caller holds host lock + */ + +void ata_sff_lost_interrupt(struct ata_port *ap) +{ + u8 status; + struct ata_queued_cmd *qc; + + /* Only one outstanding command per SFF channel */ + qc = ata_qc_from_tag(ap, ap->link.active_tag); + /* We cannot lose an interrupt on a non-existent or polled command */ + if (!qc || qc->tf.flags & ATA_TFLAG_POLLING) + return; + /* See if the controller thinks it is still busy - if so the command + isn't a lost IRQ but is still in progress */ + status = ata_sff_altstatus(ap); + if (status & ATA_BUSY) + return; + + /* There was a command running, we are no longer busy and we have + no interrupt. */ + ata_port_warn(ap, "lost interrupt (Status 0x%x)\n", + status); + /* Run the host interrupt logic as if the interrupt had not been + lost */ + ata_sff_port_intr(ap, qc); +} +EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); + +/** * ata_sff_freeze - Freeze SFF controller port * @ap: port to freeze * - * Freeze BMDMA controller port. + * Freeze SFF controller port. * * LOCKING: * Inherited from caller. */ void ata_sff_freeze(struct ata_port *ap) { - struct ata_ioports *ioaddr = &ap->ioaddr; - ap->ctl |= ATA_NIEN; ap->last_ctl = ap->ctl; - if (ioaddr->ctl_addr) - iowrite8(ap->ctl, ioaddr->ctl_addr); + if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) + ata_sff_set_devctl(ap, ap->ctl); /* Under certain circumstances, some controllers raise IRQ on * ATA_NIEN manipulation. Also, many controllers fail to mask @@ -1597,8 +1744,10 @@ void ata_sff_freeze(struct ata_port *ap) */ ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); } +EXPORT_SYMBOL_GPL(ata_sff_freeze); /** * ata_sff_thaw - Thaw SFF controller port @@ -1613,9 +1762,11 @@ void ata_sff_thaw(struct ata_port *ap) { /* clear & re-enable interrupts */ ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); - ap->ops->sff_irq_on(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); + ata_sff_irq_on(ap); } +EXPORT_SYMBOL_GPL(ata_sff_thaw); /** * ata_sff_prereset - prepare SFF link for reset @@ -1649,14 +1800,16 @@ int ata_sff_prereset(struct ata_link *link, unsigned long deadline) if (!ata_link_offline(link)) { rc = ata_sff_wait_ready(link, deadline); if (rc && rc != -ENODEV) { - ata_link_printk(link, KERN_WARNING, "device not ready " - "(errno=%d), forcing hardreset\n", rc); + ata_link_warn(link, + "device not ready (errno=%d), forcing hardreset\n", + rc); ehc->i.action |= ATA_EH_HARDRESET; } } return 0; } +EXPORT_SYMBOL_GPL(ata_sff_prereset); /** * ata_devchk - PATA device presence detection @@ -1769,6 +1922,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, return class; } +EXPORT_SYMBOL_GPL(ata_sff_dev_classify); /** * ata_sff_wait_after_reset - wait for devices to become ready after reset @@ -1796,7 +1950,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, unsigned int dev1 = devmask & (1 << 1); int rc, ret = 0; - msleep(ATA_WAIT_AFTER_RESET_MSECS); + ata_msleep(ap, ATA_WAIT_AFTER_RESET); /* always check readiness of the master device */ rc = ata_sff_wait_ready(link, deadline); @@ -1825,7 +1979,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, lbal = ioread8(ioaddr->lbal_addr); if ((nsect == 1) && (lbal == 1)) break; - msleep(50); /* give drive a breather */ + ata_msleep(ap, 50); /* give drive a breather */ } rc = ata_sff_wait_ready(link, deadline); @@ -1845,6 +1999,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, return ret; } +EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset); static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, unsigned long deadline) @@ -1859,6 +2014,7 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); udelay(20); /* FIXME: flush */ iowrite8(ap->ctl, ioaddr->ctl_addr); + ap->last_ctl = ap->ctl; /* wait the port to become ready */ return ata_sff_wait_after_reset(&ap->link, devmask, deadline); @@ -1903,7 +2059,7 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes, rc = ata_bus_softreset(ap, devmask, deadline); /* if link is occupied, -ENODEV too is an error */ if (rc && (rc != -ENODEV || sata_scr_valid(link))) { - ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc); + ata_link_err(link, "SRST failed (errno=%d)\n", rc); return rc; } @@ -1917,6 +2073,7 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes, DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); return 0; } +EXPORT_SYMBOL_GPL(ata_sff_softreset); /** * sata_sff_hardreset - reset host port via SATA phy reset @@ -1949,6 +2106,7 @@ int sata_sff_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("EXIT, class=%u\n", *class); return rc; } +EXPORT_SYMBOL_GPL(sata_sff_hardreset); /** * ata_sff_postreset - SFF postreset callback @@ -1981,12 +2139,47 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes) } /* set up device control */ - if (ap->ioaddr.ctl_addr) - iowrite8(ap->ctl, ap->ioaddr.ctl_addr); + if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) { + ata_sff_set_devctl(ap, ap->ctl); + ap->last_ctl = ap->ctl; + } +} +EXPORT_SYMBOL_GPL(ata_sff_postreset); + +/** + * ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers + * @qc: command + * + * Drain the FIFO and device of any stuck data following a command + * failing to complete. In some cases this is necessary before a + * reset will recover the device. + * + */ + +void ata_sff_drain_fifo(struct ata_queued_cmd *qc) +{ + int count; + struct ata_port *ap; + + /* We only need to flush incoming data when a command was running */ + if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) + return; + + ap = qc->ap; + /* Drain up to 64K of data before we give up this recovery method */ + for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) + && count < 65536; count += 2) + ioread16(ap->ioaddr.data_addr); + + /* Can become DEBUG later */ + if (count) + ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count); + } +EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); /** - * ata_sff_error_handler - Stock error handler for BMDMA controller + * ata_sff_error_handler - Stock error handler for SFF controller * @ap: port to handle error for * * Stock error handler for SFF controller. It can handle both @@ -2003,92 +2196,38 @@ void ata_sff_error_handler(struct ata_port *ap) ata_reset_fn_t hardreset = ap->ops->hardreset; struct ata_queued_cmd *qc; unsigned long flags; - int thaw = 0; qc = __ata_qc_from_tag(ap, ap->link.active_tag); if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) qc = NULL; - /* reset PIO HSM and stop DMA engine */ spin_lock_irqsave(ap->lock, flags); - ap->hsm_task_state = HSM_ST_IDLE; - - if (ap->ioaddr.bmdma_addr && - qc && (qc->tf.protocol == ATA_PROT_DMA || - qc->tf.protocol == ATAPI_PROT_DMA)) { - u8 host_stat; - - host_stat = ap->ops->bmdma_status(ap); - - /* BMDMA controllers indicate host bus error by - * setting DMA_ERR bit and timing out. As it wasn't - * really a timeout event, adjust error mask and - * cancel frozen state. - */ - if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { - qc->err_mask = AC_ERR_HOST_BUS; - thaw = 1; - } - - ap->ops->bmdma_stop(qc); - } - - ata_sff_altstatus(ap); - ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); + /* + * We *MUST* do FIFO draining before we issue a reset as + * several devices helpfully clear their internal state and + * will lock solid if we touch the data port post reset. Pass + * qc in case anyone wants to do different PIO/DMA recovery or + * has per command fixups + */ + if (ap->ops->sff_drain_fifo) + ap->ops->sff_drain_fifo(qc); spin_unlock_irqrestore(ap->lock, flags); - if (thaw) - ata_eh_thaw_port(ap); - - /* PIO and DMA engines have been stopped, perform recovery */ - - /* Ignore ata_sff_softreset if ctl isn't accessible and - * built-in hardresets if SCR access isn't available. - */ + /* ignore ata_sff_softreset if ctl isn't accessible */ if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) softreset = NULL; - if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) + + /* ignore built-in hardresets if SCR access is not available */ + if ((hardreset == sata_std_hardreset || + hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link)) hardreset = NULL; ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, ap->ops->postreset); } - -/** - * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller - * @qc: internal command to clean up - * - * LOCKING: - * Kernel thread context (may sleep) - */ -void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc) -{ - if (qc->ap->ioaddr.bmdma_addr) - ata_bmdma_stop(qc); -} - -/** - * ata_sff_port_start - Set port up for dma. - * @ap: Port to initialize - * - * Called just after data structures for each port are - * initialized. Allocates space for PRD table if the device - * is DMA capable SFF. - * - * May be used as the port_start() entry in ata_port_operations. - * - * LOCKING: - * Inherited from caller. - */ -int ata_sff_port_start(struct ata_port *ap) -{ - if (ap->ioaddr.bmdma_addr) - return ata_port_start(ap); - return 0; -} +EXPORT_SYMBOL_GPL(ata_sff_error_handler); /** * ata_sff_std_ports - initialize ioaddr with standard port offsets. @@ -2114,300 +2253,17 @@ void ata_sff_std_ports(struct ata_ioports *ioaddr) ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; } - -unsigned long ata_bmdma_mode_filter(struct ata_device *adev, - unsigned long xfer_mask) -{ - /* Filter out DMA modes if the device has been configured by - the BIOS as PIO only */ - - if (adev->link->ap->ioaddr.bmdma_addr == NULL) - xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); - return xfer_mask; -} - -/** - * ata_bmdma_setup - Set up PCI IDE BMDMA transaction - * @qc: Info associated with this ATA transaction. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -void ata_bmdma_setup(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); - u8 dmactl; - - /* load PRD table addr. */ - mb(); /* make sure PRD table writes are visible to controller */ - iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); - - /* specify data direction, triple-check start bit is clear */ - dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); - if (!rw) - dmactl |= ATA_DMA_WR; - iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - - /* issue r/w command */ - ap->ops->sff_exec_command(ap, &qc->tf); -} - -/** - * ata_bmdma_start - Start a PCI IDE BMDMA transaction - * @qc: Info associated with this ATA transaction. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -void ata_bmdma_start(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - u8 dmactl; - - /* start host DMA transaction */ - dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - - /* Strictly, one may wish to issue an ioread8() here, to - * flush the mmio write. However, control also passes - * to the hardware at this point, and it will interrupt - * us when we are to resume control. So, in effect, - * we don't care when the mmio write flushes. - * Further, a read of the DMA status register _immediately_ - * following the write may not be what certain flaky hardware - * is expected, so I think it is best to not add a readb() - * without first all the MMIO ATA cards/mobos. - * Or maybe I'm just being paranoid. - * - * FIXME: The posting of this write means I/O starts are - * unneccessarily delayed for MMIO - */ -} - -/** - * ata_bmdma_stop - Stop PCI IDE BMDMA transfer - * @qc: Command we are ending DMA for - * - * Clears the ATA_DMA_START flag in the dma control register - * - * May be used as the bmdma_stop() entry in ata_port_operations. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -void ata_bmdma_stop(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - void __iomem *mmio = ap->ioaddr.bmdma_addr; - - /* clear start/stop bit */ - iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, - mmio + ATA_DMA_CMD); - - /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ - ata_sff_altstatus(ap); /* dummy read */ -} - -/** - * ata_bmdma_status - Read PCI IDE BMDMA status - * @ap: Port associated with this ATA transaction. - * - * Read and return BMDMA status register. - * - * May be used as the bmdma_status() entry in ata_port_operations. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -u8 ata_bmdma_status(struct ata_port *ap) -{ - return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); -} - -/** - * ata_bus_reset - reset host port and associated ATA channel - * @ap: port to reset - * - * This is typically the first time we actually start issuing - * commands to the ATA channel. We wait for BSY to clear, then - * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its - * result. Determine what devices, if any, are on the channel - * by looking at the device 0/1 error register. Look at the signature - * stored in each device's taskfile registers, to determine if - * the device is ATA or ATAPI. - * - * LOCKING: - * PCI/etc. bus probe sem. - * Obtains host lock. - * - * SIDE EFFECTS: - * Sets ATA_FLAG_DISABLED if bus reset fails. - * - * DEPRECATED: - * This function is only for drivers which still use old EH and - * will be removed soon. - */ -void ata_bus_reset(struct ata_port *ap) -{ - struct ata_device *device = ap->link.device; - struct ata_ioports *ioaddr = &ap->ioaddr; - unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; - u8 err; - unsigned int dev0, dev1 = 0, devmask = 0; - int rc; - - DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no); - - /* determine if device 0/1 are present */ - if (ap->flags & ATA_FLAG_SATA_RESET) - dev0 = 1; - else { - dev0 = ata_devchk(ap, 0); - if (slave_possible) - dev1 = ata_devchk(ap, 1); - } - - if (dev0) - devmask |= (1 << 0); - if (dev1) - devmask |= (1 << 1); - - /* select device 0 again */ - ap->ops->sff_dev_select(ap, 0); - - /* issue bus reset */ - if (ap->flags & ATA_FLAG_SRST) { - rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ); - if (rc && rc != -ENODEV) - goto err_out; - } - - /* - * determine by signature whether we have ATA or ATAPI devices - */ - device[0].class = ata_sff_dev_classify(&device[0], dev0, &err); - if ((slave_possible) && (err != 0x81)) - device[1].class = ata_sff_dev_classify(&device[1], dev1, &err); - - /* is double-select really necessary? */ - if (device[1].class != ATA_DEV_NONE) - ap->ops->sff_dev_select(ap, 1); - if (device[0].class != ATA_DEV_NONE) - ap->ops->sff_dev_select(ap, 0); - - /* if no devices were detected, disable this port */ - if ((device[0].class == ATA_DEV_NONE) && - (device[1].class == ATA_DEV_NONE)) - goto err_out; - - if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { - /* set up device control for ATA_FLAG_SATA_RESET */ - iowrite8(ap->ctl, ioaddr->ctl_addr); - } - - DPRINTK("EXIT\n"); - return; - -err_out: - ata_port_printk(ap, KERN_ERR, "disabling port\n"); - ata_port_disable(ap); - - DPRINTK("EXIT\n"); -} +EXPORT_SYMBOL_GPL(ata_sff_std_ports); #ifdef CONFIG_PCI -/** - * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex - * @pdev: PCI device - * - * Some PCI ATA devices report simplex mode but in fact can be told to - * enter non simplex mode. This implements the necessary logic to - * perform the task on such devices. Calling it on other devices will - * have -undefined- behaviour. - */ -int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) -{ - unsigned long bmdma = pci_resource_start(pdev, 4); - u8 simplex; - - if (bmdma == 0) - return -ENOENT; - - simplex = inb(bmdma + 0x02); - outb(simplex & 0x60, bmdma + 0x02); - simplex = inb(bmdma + 0x02); - if (simplex & 0x80) - return -EOPNOTSUPP; - return 0; -} - -/** - * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host - * @host: target ATA host - * - * Acquire PCI BMDMA resources and initialize @host accordingly. - * - * LOCKING: - * Inherited from calling layer (may sleep). - * - * RETURNS: - * 0 on success, -errno otherwise. - */ -int ata_pci_bmdma_init(struct ata_host *host) -{ - struct device *gdev = host->dev; - struct pci_dev *pdev = to_pci_dev(gdev); - int i, rc; - - /* No BAR4 allocation: No DMA */ - if (pci_resource_start(pdev, 4) == 0) - return 0; - - /* TODO: If we get no DMA mask we should fall back to PIO */ - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - return rc; - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - return rc; - - /* request and iomap DMA region */ - rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev)); - if (rc) { - dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n"); - return -ENOMEM; - } - host->iomap = pcim_iomap_table(pdev); - - for (i = 0; i < 2; i++) { - struct ata_port *ap = host->ports[i]; - void __iomem *bmdma = host->iomap[4] + 8 * i; - - if (ata_port_is_dummy(ap)) - continue; - - ap->ioaddr.bmdma_addr = bmdma; - if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && - (ioread8(bmdma + 2) & 0x80)) - host->flags |= ATA_HOST_SIMPLEX; - - ata_port_desc(ap, "bmdma 0x%llx", - (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); - } - - return 0; -} - static int ata_resources_present(struct pci_dev *pdev, int port) { int i; /* Check the PCI resources for this channel are enabled */ port = port * 2; - for (i = 0; i < 2; i ++) { + for (i = 0; i < 2; i++) { if (pci_resource_start(pdev, port + i) == 0 || pci_resource_len(pdev, port + i) == 0) return 0; @@ -2462,9 +2318,9 @@ int ata_pci_sff_init_host(struct ata_host *host) rc = pcim_iomap_regions(pdev, 0x3 << base, dev_driver_string(gdev)); if (rc) { - dev_printk(KERN_WARNING, gdev, - "failed to request/iomap BARs for port %d " - "(errno=%d)\n", i, rc); + dev_warn(gdev, + "failed to request/iomap BARs for port %d (errno=%d)\n", + i, rc); if (rc == -EBUSY) pcim_pin_device(pdev); ap->ops = &ata_dummy_port_ops; @@ -2486,21 +2342,22 @@ int ata_pci_sff_init_host(struct ata_host *host) } if (!mask) { - dev_printk(KERN_ERR, gdev, "no available native port\n"); + dev_err(gdev, "no available native port\n"); return -ENODEV; } return 0; } +EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); /** - * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host + * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host * @pdev: target PCI device * @ppi: array of port_info, must be enough for two ports * @r_host: out argument for the initialized ATA host * - * Helper to allocate ATA host for @pdev, acquire all native PCI - * resources and initialize it accordingly in one go. + * Helper to allocate PIO-only SFF ATA host for @pdev, acquire + * all PCI resources and initialize it accordingly in one go. * * LOCKING: * Inherited from calling layer (may sleep). @@ -2509,7 +2366,7 @@ int ata_pci_sff_init_host(struct ata_host *host) * 0 on success, -errno otherwise. */ int ata_pci_sff_prepare_host(struct pci_dev *pdev, - const struct ata_port_info * const * ppi, + const struct ata_port_info * const *ppi, struct ata_host **r_host) { struct ata_host *host; @@ -2520,8 +2377,7 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev, host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); if (!host) { - dev_printk(KERN_ERR, &pdev->dev, - "failed to allocate ATA host\n"); + dev_err(&pdev->dev, "failed to allocate ATA host\n"); rc = -ENOMEM; goto err_out; } @@ -2530,26 +2386,15 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev, if (rc) goto err_out; - /* init DMA related stuff */ - rc = ata_pci_bmdma_init(host); - if (rc) - goto err_bmdma; - devres_remove_group(&pdev->dev, NULL); *r_host = host; return 0; - err_bmdma: - /* This is necessary because PCI and iomap resources are - * merged and releasing the top group won't release the - * acquired resources if some of those have been acquired - * before entering this function. - */ - pcim_iounmap_regions(pdev, 0xf); - err_out: +err_out: devres_release_group(&pdev->dev, NULL); return rc; } +EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host); /** * ata_pci_sff_activate_host - start SFF host, request IRQ and register it @@ -2588,28 +2433,24 @@ int ata_pci_sff_activate_host(struct ata_host *host, mask = (1 << 2) | (1 << 0); if ((tmp8 & mask) != mask) legacy_mode = 1; -#if defined(CONFIG_NO_ATA_LEGACY) - /* Some platforms with PCI limits cannot address compat - port space. In that case we punt if their firmware has - left a device in compatibility mode */ - if (legacy_mode) { - printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n"); - return -EOPNOTSUPP; - } -#endif } if (!devres_open_group(dev, NULL, GFP_KERNEL)) return -ENOMEM; if (!legacy_mode && pdev->irq) { + int i; + rc = devm_request_irq(dev, pdev->irq, irq_handler, IRQF_SHARED, drv_name, host); if (rc) goto out; - ata_port_desc(host->ports[0], "irq %d", pdev->irq); - ata_port_desc(host->ports[1], "irq %d", pdev->irq); + for (i = 0; i < 2; i++) { + if (ata_port_is_dummy(host->ports[i])) + continue; + ata_port_desc(host->ports[i], "irq %d", pdev->irq); + } } else if (legacy_mode) { if (!ata_port_is_dummy(host->ports[0])) { rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev), @@ -2635,7 +2476,7 @@ int ata_pci_sff_activate_host(struct ata_host *host, } rc = ata_host_register(host, sht); - out: +out: if (rc == 0) devres_remove_group(dev, NULL); else @@ -2643,21 +2484,86 @@ int ata_pci_sff_activate_host(struct ata_host *host, return rc; } +EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); + +static const struct ata_port_info *ata_sff_find_valid_pi( + const struct ata_port_info * const *ppi) +{ + int i; + + /* look up the first valid port_info */ + for (i = 0; i < 2 && ppi[i]; i++) + if (ppi[i]->port_ops != &ata_dummy_port_ops) + return ppi[i]; + + return NULL; +} + +static int ata_pci_init_one(struct pci_dev *pdev, + const struct ata_port_info * const *ppi, + struct scsi_host_template *sht, void *host_priv, + int hflags, bool bmdma) +{ + struct device *dev = &pdev->dev; + const struct ata_port_info *pi; + struct ata_host *host = NULL; + int rc; + + DPRINTK("ENTER\n"); + + pi = ata_sff_find_valid_pi(ppi); + if (!pi) { + dev_err(&pdev->dev, "no valid port_info specified\n"); + return -EINVAL; + } + + if (!devres_open_group(dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + rc = pcim_enable_device(pdev); + if (rc) + goto out; + +#ifdef CONFIG_ATA_BMDMA + if (bmdma) + /* prepare and activate BMDMA host */ + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); + else +#endif + /* prepare and activate SFF host */ + rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + if (rc) + goto out; + host->private_data = host_priv; + host->flags |= hflags; + +#ifdef CONFIG_ATA_BMDMA + if (bmdma) { + pci_set_master(pdev); + rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); + } else +#endif + rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); +out: + if (rc == 0) + devres_remove_group(&pdev->dev, NULL); + else + devres_release_group(&pdev->dev, NULL); + + return rc; +} /** - * ata_pci_sff_init_one - Initialize/register PCI IDE host controller + * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller * @pdev: Controller to be initialized * @ppi: array of port_info, must be enough for two ports * @sht: scsi_host_template to use when registering the host * @host_priv: host private_data + * @hflag: host flags * * This is a helper function which can be called from a driver's * xxx_init_one() probe function if the hardware uses traditional - * IDE taskfile registers. - * - * This function calls pci_enable_device(), reserves its register - * regions, sets the dma mask, enables bus master mode, and calls - * ata_device_add() + * IDE taskfile registers and is PIO only. * * ASSUMPTION: * Nobody makes a single channel controller that appears solely as @@ -2670,100 +2576,762 @@ int ata_pci_sff_activate_host(struct ata_host *host, * Zero on success, negative on errno-based value on error. */ int ata_pci_sff_init_one(struct pci_dev *pdev, - const struct ata_port_info * const * ppi, - struct scsi_host_template *sht, void *host_priv) + const struct ata_port_info * const *ppi, + struct scsi_host_template *sht, void *host_priv, int hflag) { - struct device *dev = &pdev->dev; - const struct ata_port_info *pi = NULL; - struct ata_host *host = NULL; - int i, rc; + return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0); +} +EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); - DPRINTK("ENTER\n"); +#endif /* CONFIG_PCI */ - /* look up the first valid port_info */ - for (i = 0; i < 2 && ppi[i]; i++) { - if (ppi[i]->port_ops != &ata_dummy_port_ops) { - pi = ppi[i]; - break; +/* + * BMDMA support + */ + +#ifdef CONFIG_ATA_BMDMA + +const struct ata_port_operations ata_bmdma_port_ops = { + .inherits = &ata_sff_port_ops, + + .error_handler = ata_bmdma_error_handler, + .post_internal_cmd = ata_bmdma_post_internal_cmd, + + .qc_prep = ata_bmdma_qc_prep, + .qc_issue = ata_bmdma_qc_issue, + + .sff_irq_clear = ata_bmdma_irq_clear, + .bmdma_setup = ata_bmdma_setup, + .bmdma_start = ata_bmdma_start, + .bmdma_stop = ata_bmdma_stop, + .bmdma_status = ata_bmdma_status, + + .port_start = ata_bmdma_port_start, +}; +EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); + +const struct ata_port_operations ata_bmdma32_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .sff_data_xfer = ata_sff_data_xfer32, + .port_start = ata_bmdma_port_start32, +}; +EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); + +/** + * ata_bmdma_fill_sg - Fill PCI IDE PRD table + * @qc: Metadata associated with taskfile to be transferred + * + * Fill PCI IDE PRD (scatter-gather) table with segments + * associated with the current disk command. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + */ +static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_bmdma_prd *prd = ap->bmdma_prd; + struct scatterlist *sg; + unsigned int si, pi; + + pi = 0; + for_each_sg(qc->sg, sg, qc->n_elem, si) { + u32 addr, offset; + u32 sg_len, len; + + /* determine if physical DMA addr spans 64K boundary. + * Note h/w doesn't support 64-bit, so we unconditionally + * truncate dma_addr_t to u32. + */ + addr = (u32) sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + while (sg_len) { + offset = addr & 0xffff; + len = sg_len; + if ((offset + sg_len) > 0x10000) + len = 0x10000 - offset; + + prd[pi].addr = cpu_to_le32(addr); + prd[pi].flags_len = cpu_to_le32(len & 0xffff); + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); + + pi++; + sg_len -= len; + addr += len; } } - if (!pi) { - dev_printk(KERN_ERR, &pdev->dev, - "no valid port_info specified\n"); - return -EINVAL; + prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); +} + +/** + * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table + * @qc: Metadata associated with taskfile to be transferred + * + * Fill PCI IDE PRD (scatter-gather) table with segments + * associated with the current disk command. Perform the fill + * so that we avoid writing any length 64K records for + * controllers that don't follow the spec. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + */ +static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_bmdma_prd *prd = ap->bmdma_prd; + struct scatterlist *sg; + unsigned int si, pi; + + pi = 0; + for_each_sg(qc->sg, sg, qc->n_elem, si) { + u32 addr, offset; + u32 sg_len, len, blen; + + /* determine if physical DMA addr spans 64K boundary. + * Note h/w doesn't support 64-bit, so we unconditionally + * truncate dma_addr_t to u32. + */ + addr = (u32) sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + while (sg_len) { + offset = addr & 0xffff; + len = sg_len; + if ((offset + sg_len) > 0x10000) + len = 0x10000 - offset; + + blen = len & 0xffff; + prd[pi].addr = cpu_to_le32(addr); + if (blen == 0) { + /* Some PATA chipsets like the CS5530 can't + cope with 0x0000 meaning 64K as the spec + says */ + prd[pi].flags_len = cpu_to_le32(0x8000); + blen = 0x8000; + prd[++pi].addr = cpu_to_le32(addr + 0x8000); + } + prd[pi].flags_len = cpu_to_le32(blen); + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); + + pi++; + sg_len -= len; + addr += len; + } } - if (!devres_open_group(dev, NULL, GFP_KERNEL)) - return -ENOMEM; + prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); +} - rc = pcim_enable_device(pdev); - if (rc) - goto out; +/** + * ata_bmdma_qc_prep - Prepare taskfile for submission + * @qc: Metadata associated with taskfile to be prepared + * + * Prepare ATA taskfile for submission. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) +{ + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) + return; - /* prepare and activate SFF host */ - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); - if (rc) - goto out; - host->private_data = host_priv; + ata_bmdma_fill_sg(qc); +} +EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); - pci_set_master(pdev); - rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); - out: - if (rc == 0) - devres_remove_group(&pdev->dev, NULL); - else - devres_release_group(&pdev->dev, NULL); +/** + * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission + * @qc: Metadata associated with taskfile to be prepared + * + * Prepare ATA taskfile for submission. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) +{ + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) + return; - return rc; + ata_bmdma_fill_sg_dumb(qc); } +EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); -#endif /* CONFIG_PCI */ +/** + * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller + * @qc: command to issue to device + * + * This function issues a PIO, NODATA or DMA command to a + * SFF/BMDMA controller. PIO and NODATA are handled by + * ata_sff_qc_issue(). + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * Zero on success, AC_ERR_* mask on failure + */ +unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_link *link = qc->dev->link; -EXPORT_SYMBOL_GPL(ata_sff_port_ops); -EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); -EXPORT_SYMBOL_GPL(ata_sff_qc_prep); -EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); -EXPORT_SYMBOL_GPL(ata_sff_dev_select); -EXPORT_SYMBOL_GPL(ata_sff_check_status); -EXPORT_SYMBOL_GPL(ata_sff_altstatus); -EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); -EXPORT_SYMBOL_GPL(ata_sff_wait_ready); -EXPORT_SYMBOL_GPL(ata_sff_tf_load); -EXPORT_SYMBOL_GPL(ata_sff_tf_read); -EXPORT_SYMBOL_GPL(ata_sff_exec_command); -EXPORT_SYMBOL_GPL(ata_sff_data_xfer); -EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq); -EXPORT_SYMBOL_GPL(ata_sff_irq_on); -EXPORT_SYMBOL_GPL(ata_sff_irq_clear); -EXPORT_SYMBOL_GPL(ata_sff_hsm_move); -EXPORT_SYMBOL_GPL(ata_sff_qc_issue); -EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); -EXPORT_SYMBOL_GPL(ata_sff_host_intr); -EXPORT_SYMBOL_GPL(ata_sff_interrupt); -EXPORT_SYMBOL_GPL(ata_sff_freeze); -EXPORT_SYMBOL_GPL(ata_sff_thaw); -EXPORT_SYMBOL_GPL(ata_sff_prereset); -EXPORT_SYMBOL_GPL(ata_sff_dev_classify); -EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset); -EXPORT_SYMBOL_GPL(ata_sff_softreset); -EXPORT_SYMBOL_GPL(sata_sff_hardreset); -EXPORT_SYMBOL_GPL(ata_sff_postreset); -EXPORT_SYMBOL_GPL(ata_sff_error_handler); -EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd); -EXPORT_SYMBOL_GPL(ata_sff_port_start); -EXPORT_SYMBOL_GPL(ata_sff_std_ports); -EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter); + /* defer PIO handling to sff_qc_issue */ + if (!ata_is_dma(qc->tf.protocol)) + return ata_sff_qc_issue(qc); + + /* select the device */ + ata_dev_select(ap, qc->dev->devno, 1, 0); + + /* start the command */ + switch (qc->tf.protocol) { + case ATA_PROT_DMA: + WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); + + ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ + ap->ops->bmdma_setup(qc); /* set up bmdma */ + ap->ops->bmdma_start(qc); /* initiate bmdma */ + ap->hsm_task_state = HSM_ST_LAST; + break; + + case ATAPI_PROT_DMA: + WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); + + ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ + ap->ops->bmdma_setup(qc); /* set up bmdma */ + ap->hsm_task_state = HSM_ST_FIRST; + + /* send cdb by polling if no cdb interrupt */ + if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) + ata_sff_queue_pio_task(link, 0); + break; + + default: + WARN_ON(1); + return AC_ERR_SYSTEM; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue); + +/** + * ata_bmdma_port_intr - Handle BMDMA port interrupt + * @ap: Port on which interrupt arrived (possibly...) + * @qc: Taskfile currently active in engine + * + * Handle port interrupt for given queued command. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * One if interrupt was handled, zero if not (shared irq). + */ +unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) +{ + struct ata_eh_info *ehi = &ap->link.eh_info; + u8 host_stat = 0; + bool bmdma_stopped = false; + unsigned int handled; + + if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { + /* check status of DMA engine */ + host_stat = ap->ops->bmdma_status(ap); + VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat); + + /* if it's not our irq... */ + if (!(host_stat & ATA_DMA_INTR)) + return ata_sff_idle_irq(ap); + + /* before we do anything else, clear DMA-Start bit */ + ap->ops->bmdma_stop(qc); + bmdma_stopped = true; + + if (unlikely(host_stat & ATA_DMA_ERR)) { + /* error when transferring data to/from memory */ + qc->err_mask |= AC_ERR_HOST_BUS; + ap->hsm_task_state = HSM_ST_ERR; + } + } + + handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); + + if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) + ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); + + return handled; +} +EXPORT_SYMBOL_GPL(ata_bmdma_port_intr); + +/** + * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler + * @irq: irq line (unused) + * @dev_instance: pointer to our ata_host information structure + * + * Default interrupt handler for PCI IDE devices. Calls + * ata_bmdma_port_intr() for each port that is not disabled. + * + * LOCKING: + * Obtains host lock during operation. + * + * RETURNS: + * IRQ_NONE or IRQ_HANDLED. + */ +irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance) +{ + return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr); +} +EXPORT_SYMBOL_GPL(ata_bmdma_interrupt); + +/** + * ata_bmdma_error_handler - Stock error handler for BMDMA controller + * @ap: port to handle error for + * + * Stock error handler for BMDMA controller. It can handle both + * PATA and SATA controllers. Most BMDMA controllers should be + * able to use this EH as-is or with some added handling before + * and after. + * + * LOCKING: + * Kernel thread context (may sleep) + */ +void ata_bmdma_error_handler(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + unsigned long flags; + bool thaw = false; + + qc = __ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) + qc = NULL; + + /* reset PIO HSM and stop DMA engine */ + spin_lock_irqsave(ap->lock, flags); + + if (qc && ata_is_dma(qc->tf.protocol)) { + u8 host_stat; + + host_stat = ap->ops->bmdma_status(ap); + + /* BMDMA controllers indicate host bus error by + * setting DMA_ERR bit and timing out. As it wasn't + * really a timeout event, adjust error mask and + * cancel frozen state. + */ + if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { + qc->err_mask = AC_ERR_HOST_BUS; + thaw = true; + } + + ap->ops->bmdma_stop(qc); + + /* if we're gonna thaw, make sure IRQ is clear */ + if (thaw) { + ap->ops->sff_check_status(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); + } + } + + spin_unlock_irqrestore(ap->lock, flags); + + if (thaw) + ata_eh_thaw_port(ap); + + ata_sff_error_handler(ap); +} +EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); + +/** + * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA + * @qc: internal command to clean up + * + * LOCKING: + * Kernel thread context (may sleep) + */ +void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned long flags; + + if (ata_is_dma(qc->tf.protocol)) { + spin_lock_irqsave(ap->lock, flags); + ap->ops->bmdma_stop(qc); + spin_unlock_irqrestore(ap->lock, flags); + } +} +EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); + +/** + * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. + * @ap: Port associated with this ATA transaction. + * + * Clear interrupt and error flags in DMA status register. + * + * May be used as the irq_clear() entry in ata_port_operations. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_irq_clear(struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.bmdma_addr; + + if (!mmio) + return; + + iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); +} +EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); + +/** + * ata_bmdma_setup - Set up PCI IDE BMDMA transaction + * @qc: Info associated with this ATA transaction. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); + u8 dmactl; + + /* load PRD table addr. */ + mb(); /* make sure PRD table writes are visible to controller */ + iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); + + /* specify data direction, triple-check start bit is clear */ + dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); + if (!rw) + dmactl |= ATA_DMA_WR; + iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + + /* issue r/w command */ + ap->ops->sff_exec_command(ap, &qc->tf); +} EXPORT_SYMBOL_GPL(ata_bmdma_setup); + +/** + * ata_bmdma_start - Start a PCI IDE BMDMA transaction + * @qc: Info associated with this ATA transaction. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + u8 dmactl; + + /* start host DMA transaction */ + dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + + /* Strictly, one may wish to issue an ioread8() here, to + * flush the mmio write. However, control also passes + * to the hardware at this point, and it will interrupt + * us when we are to resume control. So, in effect, + * we don't care when the mmio write flushes. + * Further, a read of the DMA status register _immediately_ + * following the write may not be what certain flaky hardware + * is expected, so I think it is best to not add a readb() + * without first all the MMIO ATA cards/mobos. + * Or maybe I'm just being paranoid. + * + * FIXME: The posting of this write means I/O starts are + * unnecessarily delayed for MMIO + */ +} EXPORT_SYMBOL_GPL(ata_bmdma_start); + +/** + * ata_bmdma_stop - Stop PCI IDE BMDMA transfer + * @qc: Command we are ending DMA for + * + * Clears the ATA_DMA_START flag in the dma control register + * + * May be used as the bmdma_stop() entry in ata_port_operations. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_stop(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *mmio = ap->ioaddr.bmdma_addr; + + /* clear start/stop bit */ + iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, + mmio + ATA_DMA_CMD); + + /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ + ata_sff_dma_pause(ap); +} EXPORT_SYMBOL_GPL(ata_bmdma_stop); + +/** + * ata_bmdma_status - Read PCI IDE BMDMA status + * @ap: Port associated with this ATA transaction. + * + * Read and return BMDMA status register. + * + * May be used as the bmdma_status() entry in ata_port_operations. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +u8 ata_bmdma_status(struct ata_port *ap) +{ + return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); +} EXPORT_SYMBOL_GPL(ata_bmdma_status); -EXPORT_SYMBOL_GPL(ata_bus_reset); + + +/** + * ata_bmdma_port_start - Set port up for bmdma. + * @ap: Port to initialize + * + * Called just after data structures for each port are + * initialized. Allocates space for PRD table. + * + * May be used as the port_start() entry in ata_port_operations. + * + * LOCKING: + * Inherited from caller. + */ +int ata_bmdma_port_start(struct ata_port *ap) +{ + if (ap->mwdma_mask || ap->udma_mask) { + ap->bmdma_prd = + dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ, + &ap->bmdma_prd_dma, GFP_KERNEL); + if (!ap->bmdma_prd) + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ata_bmdma_port_start); + +/** + * ata_bmdma_port_start32 - Set port up for dma. + * @ap: Port to initialize + * + * Called just after data structures for each port are + * initialized. Enables 32bit PIO and allocates space for PRD + * table. + * + * May be used as the port_start() entry in ata_port_operations for + * devices that are capable of 32bit PIO. + * + * LOCKING: + * Inherited from caller. + */ +int ata_bmdma_port_start32(struct ata_port *ap) +{ + ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; + return ata_bmdma_port_start(ap); +} +EXPORT_SYMBOL_GPL(ata_bmdma_port_start32); + #ifdef CONFIG_PCI + +/** + * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex + * @pdev: PCI device + * + * Some PCI ATA devices report simplex mode but in fact can be told to + * enter non simplex mode. This implements the necessary logic to + * perform the task on such devices. Calling it on other devices will + * have -undefined- behaviour. + */ +int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) +{ + unsigned long bmdma = pci_resource_start(pdev, 4); + u8 simplex; + + if (bmdma == 0) + return -ENOENT; + + simplex = inb(bmdma + 0x02); + outb(simplex & 0x60, bmdma + 0x02); + simplex = inb(bmdma + 0x02); + if (simplex & 0x80) + return -EOPNOTSUPP; + return 0; +} EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); + +static void ata_bmdma_nodma(struct ata_host *host, const char *reason) +{ + int i; + + dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason); + + for (i = 0; i < 2; i++) { + host->ports[i]->mwdma_mask = 0; + host->ports[i]->udma_mask = 0; + } +} + +/** + * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host + * @host: target ATA host + * + * Acquire PCI BMDMA resources and initialize @host accordingly. + * + * LOCKING: + * Inherited from calling layer (may sleep). + */ +void ata_pci_bmdma_init(struct ata_host *host) +{ + struct device *gdev = host->dev; + struct pci_dev *pdev = to_pci_dev(gdev); + int i, rc; + + /* No BAR4 allocation: No DMA */ + if (pci_resource_start(pdev, 4) == 0) { + ata_bmdma_nodma(host, "BAR4 is zero"); + return; + } + + /* + * Some controllers require BMDMA region to be initialized + * even if DMA is not in use to clear IRQ status via + * ->sff_irq_clear method. Try to initialize bmdma_addr + * regardless of dma masks. + */ + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + ata_bmdma_nodma(host, "failed to set dma mask"); + if (!rc) { + rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + ata_bmdma_nodma(host, + "failed to set consistent dma mask"); + } + + /* request and iomap DMA region */ + rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev)); + if (rc) { + ata_bmdma_nodma(host, "failed to request/iomap BAR4"); + return; + } + host->iomap = pcim_iomap_table(pdev); + + for (i = 0; i < 2; i++) { + struct ata_port *ap = host->ports[i]; + void __iomem *bmdma = host->iomap[4] + 8 * i; + + if (ata_port_is_dummy(ap)) + continue; + + ap->ioaddr.bmdma_addr = bmdma; + if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && + (ioread8(bmdma + 2) & 0x80)) + host->flags |= ATA_HOST_SIMPLEX; + + ata_port_desc(ap, "bmdma 0x%llx", + (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); + } +} EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); -EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); -EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host); -EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); -EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); + +/** + * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host + * @pdev: target PCI device + * @ppi: array of port_info, must be enough for two ports + * @r_host: out argument for the initialized ATA host + * + * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI + * resources and initialize it accordingly in one go. + * + * LOCKING: + * Inherited from calling layer (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct ata_host **r_host) +{ + int rc; + + rc = ata_pci_sff_prepare_host(pdev, ppi, r_host); + if (rc) + return rc; + + ata_pci_bmdma_init(*r_host); + return 0; +} +EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host); + +/** + * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller + * @pdev: Controller to be initialized + * @ppi: array of port_info, must be enough for two ports + * @sht: scsi_host_template to use when registering the host + * @host_priv: host private_data + * @hflags: host flags + * + * This function is similar to ata_pci_sff_init_one() but also + * takes care of BMDMA initialization. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, negative on errno-based value on error. + */ +int ata_pci_bmdma_init_one(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct scsi_host_template *sht, void *host_priv, + int hflags) +{ + return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1); +} +EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one); + #endif /* CONFIG_PCI */ +#endif /* CONFIG_ATA_BMDMA */ + +/** + * ata_sff_port_init - Initialize SFF/BMDMA ATA port + * @ap: Port to initialize + * + * Called on port allocation to initialize SFF/BMDMA specific + * fields. + * + * LOCKING: + * None. + */ +void ata_sff_port_init(struct ata_port *ap) +{ + INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task); + ap->ctl = ATA_DEVCTL_OBS; + ap->last_ctl = 0xFF; +} + +int __init ata_sff_init(void) +{ + ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE); + if (!ata_sff_wq) + return -ENOMEM; + + return 0; +} + +void ata_sff_exit(void) +{ + destroy_workqueue(ata_sff_wq); +} |
