diff options
Diffstat (limited to 'drivers/scsi/libata-core.c')
-rw-r--r-- | drivers/scsi/libata-core.c | 4024 |
1 files changed, 4024 insertions, 0 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c new file mode 100644 index 00000000000..0b5d3a5b7ed --- /dev/null +++ b/drivers/scsi/libata-core.c @@ -0,0 +1,4024 @@ +/* + libata-core.c - helper library for ATA + + Copyright 2003-2004 Red Hat, Inc. All rights reserved. + Copyright 2003-2004 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/highmem.h> +#include <linux/spinlock.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/interrupt.h> +#include <linux/completion.h> +#include <linux/suspend.h> +#include <linux/workqueue.h> +#include <scsi/scsi.h> +#include "scsi.h" +#include "scsi_priv.h" +#include <scsi/scsi_host.h> +#include <linux/libata.h> +#include <asm/io.h> +#include <asm/semaphore.h> +#include <asm/byteorder.h> + +#include "libata.h" + +static unsigned int ata_busy_sleep (struct ata_port *ap, + unsigned long tmout_pat, + unsigned long tmout); +static void ata_set_mode(struct ata_port *ap); +static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); +static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift); +static int fgb(u32 bitmap); +static int ata_choose_xfer_mode(struct ata_port *ap, + u8 *xfer_mode_out, + unsigned int *xfer_shift_out); +static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat); +static void __ata_qc_complete(struct ata_queued_cmd *qc); + +static unsigned int ata_unique_id = 1; +static struct workqueue_struct *ata_wq; + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_DESCRIPTION("Library module for ATA devices"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/** + * ata_tf_load - send taskfile registers to host controller + * @ap: Port to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller. + * + * LOCKING: + * Inherited from caller. + */ + +static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + if (tf->ctl != ap->last_ctl) { + outb(tf->ctl, ioaddr->ctl_addr); + ap->last_ctl = tf->ctl; + ata_wait_idle(ap); + } + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + outb(tf->hob_feature, ioaddr->feature_addr); + outb(tf->hob_nsect, ioaddr->nsect_addr); + outb(tf->hob_lbal, ioaddr->lbal_addr); + outb(tf->hob_lbam, ioaddr->lbam_addr); + outb(tf->hob_lbah, ioaddr->lbah_addr); + VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", + tf->hob_feature, + tf->hob_nsect, + tf->hob_lbal, + tf->hob_lbam, + tf->hob_lbah); + } + + if (is_addr) { + outb(tf->feature, ioaddr->feature_addr); + outb(tf->nsect, ioaddr->nsect_addr); + outb(tf->lbal, ioaddr->lbal_addr); + outb(tf->lbam, ioaddr->lbam_addr); + outb(tf->lbah, ioaddr->lbah_addr); + VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", + tf->feature, + tf->nsect, + tf->lbal, + tf->lbam, + tf->lbah); + } + + if (tf->flags & ATA_TFLAG_DEVICE) { + outb(tf->device, ioaddr->device_addr); + VPRINTK("device 0x%X\n", tf->device); + } + + ata_wait_idle(ap); +} + +/** + * ata_tf_load_mmio - send taskfile registers to host controller + * @ap: Port to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller using MMIO. + * + * LOCKING: + * Inherited from caller. + */ + +static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + if (tf->ctl != ap->last_ctl) { + writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr); + ap->last_ctl = tf->ctl; + ata_wait_idle(ap); + } + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr); + writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr); + writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr); + writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr); + writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr); + VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", + tf->hob_feature, + tf->hob_nsect, + tf->hob_lbal, + tf->hob_lbam, + tf->hob_lbah); + } + + if (is_addr) { + writeb(tf->feature, (void __iomem *) ioaddr->feature_addr); + writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr); + writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr); + writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr); + writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr); + VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", + tf->feature, + tf->nsect, + tf->lbal, + tf->lbam, + tf->lbah); + } + + if (tf->flags & ATA_TFLAG_DEVICE) { + writeb(tf->device, (void __iomem *) ioaddr->device_addr); + VPRINTK("device 0x%X\n", tf->device); + } + + ata_wait_idle(ap); +} + +void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) +{ + if (ap->flags & ATA_FLAG_MMIO) + ata_tf_load_mmio(ap, tf); + else + ata_tf_load_pio(ap, tf); +} + +/** + * ata_exec_command - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues PIO/MMIO write to ATA command register, with proper + * synchronization with interrupt handler / other threads. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf) +{ + DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); + + outb(tf->command, ap->ioaddr.command_addr); + ata_pause(ap); +} + + +/** + * ata_exec_command_mmio - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues MMIO write to ATA command register, with proper + * synchronization with interrupt handler / other threads. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) +{ + DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); + + writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr); + ata_pause(ap); +} + +void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf) +{ + if (ap->flags & ATA_FLAG_MMIO) + ata_exec_command_mmio(ap, tf); + else + ata_exec_command_pio(ap, tf); +} + +/** + * ata_exec - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues PIO/MMIO write to ATA command register, with proper + * synchronization with interrupt handler / other threads. + * + * LOCKING: + * Obtains host_set lock. + */ + +static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf) +{ + unsigned long flags; + + DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); + spin_lock_irqsave(&ap->host_set->lock, flags); + ap->ops->exec_command(ap, tf); + spin_unlock_irqrestore(&ap->host_set->lock, flags); +} + +/** + * ata_tf_to_host - issue ATA taskfile to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues ATA taskfile register set to ATA host controller, + * with proper synchronization with interrupt handler and + * other threads. + * + * LOCKING: + * Obtains host_set lock. + */ + +static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf) +{ + ap->ops->tf_load(ap, tf); + + ata_exec(ap, tf); +} + +/** + * ata_tf_to_host_nolock - issue ATA taskfile to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues ATA taskfile register set to ATA host controller, + * with proper synchronization with interrupt handler and + * other threads. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf) +{ + ap->ops->tf_load(ap, tf); + ap->ops->exec_command(ap, tf); +} + +/** + * ata_tf_read - input device's ATA taskfile shadow registers + * @ap: Port from which input is read + * @tf: ATA taskfile register set for storing input + * + * Reads ATA taskfile registers for currently-selected device + * into @tf. + * + * LOCKING: + * Inherited from caller. + */ + +static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + tf->nsect = inb(ioaddr->nsect_addr); + tf->lbal = inb(ioaddr->lbal_addr); + tf->lbam = inb(ioaddr->lbam_addr); + tf->lbah = inb(ioaddr->lbah_addr); + tf->device = inb(ioaddr->device_addr); + + if (tf->flags & ATA_TFLAG_LBA48) { + outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr); + tf->hob_feature = inb(ioaddr->error_addr); + tf->hob_nsect = inb(ioaddr->nsect_addr); + tf->hob_lbal = inb(ioaddr->lbal_addr); + tf->hob_lbam = inb(ioaddr->lbam_addr); + tf->hob_lbah = inb(ioaddr->lbah_addr); + } +} + +/** + * ata_tf_read_mmio - input device's ATA taskfile shadow registers + * @ap: Port from which input is read + * @tf: ATA taskfile register set for storing input + * + * Reads ATA taskfile registers for currently-selected device + * into @tf via MMIO. + * + * LOCKING: + * Inherited from caller. + */ + +static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + tf->nsect = readb((void __iomem *)ioaddr->nsect_addr); + tf->lbal = readb((void __iomem *)ioaddr->lbal_addr); + tf->lbam = readb((void __iomem *)ioaddr->lbam_addr); + tf->lbah = readb((void __iomem *)ioaddr->lbah_addr); + tf->device = readb((void __iomem *)ioaddr->device_addr); + + if (tf->flags & ATA_TFLAG_LBA48) { + writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr); + tf->hob_feature = readb((void __iomem *)ioaddr->error_addr); + tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr); + tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr); + tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr); + tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr); + } +} + +void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +{ + if (ap->flags & ATA_FLAG_MMIO) + ata_tf_read_mmio(ap, tf); + else + ata_tf_read_pio(ap, tf); +} + +/** + * ata_check_status_pio - Read device status reg & clear interrupt + * @ap: port where the device is + * + * Reads ATA taskfile status register for currently-selected device + * and return it's value. This also clears pending interrupts + * from this device + * + * LOCKING: + * Inherited from caller. + */ +static u8 ata_check_status_pio(struct ata_port *ap) +{ + return inb(ap->ioaddr.status_addr); +} + +/** + * ata_check_status_mmio - Read device status reg & clear interrupt + * @ap: port where the device is + * + * Reads ATA taskfile status register for currently-selected device + * via MMIO and return it's value. This also clears pending interrupts + * from this device + * + * LOCKING: + * Inherited from caller. + */ +static u8 ata_check_status_mmio(struct ata_port *ap) +{ + return readb((void __iomem *) ap->ioaddr.status_addr); +} + +u8 ata_check_status(struct ata_port *ap) +{ + if (ap->flags & ATA_FLAG_MMIO) + return ata_check_status_mmio(ap); + return ata_check_status_pio(ap); +} + +u8 ata_altstatus(struct ata_port *ap) +{ + if (ap->ops->check_altstatus) + return ap->ops->check_altstatus(ap); + + if (ap->flags & ATA_FLAG_MMIO) + return readb((void __iomem *)ap->ioaddr.altstatus_addr); + return inb(ap->ioaddr.altstatus_addr); +} + +u8 ata_chk_err(struct ata_port *ap) +{ + if (ap->ops->check_err) + return ap->ops->check_err(ap); + + if (ap->flags & ATA_FLAG_MMIO) { + return readb((void __iomem *) ap->ioaddr.error_addr); + } + return inb(ap->ioaddr.error_addr); +} + +/** + * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure + * @tf: Taskfile to convert + * @fis: Buffer into which data will output + * @pmp: Port multiplier port + * + * Converts a standard ATA taskfile to a Serial ATA + * FIS structure (Register - Host to Device). + * + * LOCKING: + * Inherited from caller. + */ + +void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp) +{ + fis[0] = 0x27; /* Register - Host to Device FIS */ + fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number, + bit 7 indicates Command FIS */ + fis[2] = tf->command; + fis[3] = tf->feature; + + fis[4] = tf->lbal; + fis[5] = tf->lbam; + fis[6] = tf->lbah; + fis[7] = tf->device; + + fis[8] = tf->hob_lbal; + fis[9] = tf->hob_lbam; + fis[10] = tf->hob_lbah; + fis[11] = tf->hob_feature; + + fis[12] = tf->nsect; + fis[13] = tf->hob_nsect; + fis[14] = 0; + fis[15] = tf->ctl; + + fis[16] = 0; + fis[17] = 0; + fis[18] = 0; + fis[19] = 0; +} + +/** + * ata_tf_from_fis - Convert SATA FIS to ATA taskfile + * @fis: Buffer from which data will be input + * @tf: Taskfile to output + * + * Converts a standard ATA taskfile to a Serial ATA + * FIS structure (Register - Host to Device). + * + * LOCKING: + * Inherited from caller. + */ + +void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf) +{ + tf->command = fis[2]; /* status */ + tf->feature = fis[3]; /* error */ + + tf->lbal = fis[4]; + tf->lbam = fis[5]; + tf->lbah = fis[6]; + tf->device = fis[7]; + + tf->hob_lbal = fis[8]; + tf->hob_lbam = fis[9]; + tf->hob_lbah = fis[10]; + + tf->nsect = fis[12]; + tf->hob_nsect = fis[13]; +} + +/** + * ata_prot_to_cmd - determine which read/write opcodes to use + * @protocol: ATA_PROT_xxx taskfile protocol + * @lba48: true is lba48 is present + * + * Given necessary input, determine which read/write commands + * to use to transfer data. + * + * LOCKING: + * None. + */ +static int ata_prot_to_cmd(int protocol, int lba48) +{ + int rcmd = 0, wcmd = 0; + + switch (protocol) { + case ATA_PROT_PIO: + if (lba48) { + rcmd = ATA_CMD_PIO_READ_EXT; + wcmd = ATA_CMD_PIO_WRITE_EXT; + } else { + rcmd = ATA_CMD_PIO_READ; + wcmd = ATA_CMD_PIO_WRITE; + } + break; + + case ATA_PROT_DMA: + if (lba48) { + rcmd = ATA_CMD_READ_EXT; + wcmd = ATA_CMD_WRITE_EXT; + } else { + rcmd = ATA_CMD_READ; + wcmd = ATA_CMD_WRITE; + } + break; + + default: + return -1; + } + + return rcmd | (wcmd << 8); +} + +/** + * ata_dev_set_protocol - set taskfile protocol and r/w commands + * @dev: device to examine and configure + * + * Examine the device configuration, after we have + * read the identify-device page and configured the + * data transfer mode. Set internal state related to + * the ATA taskfile protocol (pio, pio mult, dma, etc.) + * and calculate the proper read/write commands to use. + * + * LOCKING: + * caller. + */ +static void ata_dev_set_protocol(struct ata_device *dev) +{ + int pio = (dev->flags & ATA_DFLAG_PIO); + int lba48 = (dev->flags & ATA_DFLAG_LBA48); + int proto, cmd; + + if (pio) + proto = dev->xfer_protocol = ATA_PROT_PIO; + else + proto = dev->xfer_protocol = ATA_PROT_DMA; + + cmd = ata_prot_to_cmd(proto, lba48); + if (cmd < 0) + BUG(); + + dev->read_cmd = cmd & 0xff; + dev->write_cmd = (cmd >> 8) & 0xff; +} + +static const char * xfer_mode_str[] = { + "UDMA/16", + "UDMA/25", + "UDMA/33", + "UDMA/44", + "UDMA/66", + "UDMA/100", + "UDMA/133", + "UDMA7", + "MWDMA0", + "MWDMA1", + "MWDMA2", + "PIO0", + "PIO1", + "PIO2", + "PIO3", + "PIO4", +}; + +/** + * ata_udma_string - convert UDMA bit offset to string + * @mask: mask of bits supported; only highest bit counts. + * + * Determine string which represents the highest speed + * (highest bit in @udma_mask). + * + * LOCKING: + * None. + * + * RETURNS: + * Constant C string representing highest speed listed in + * @udma_mask, or the constant C string "<n/a>". + */ + +static const char *ata_mode_string(unsigned int mask) +{ + int i; + + for (i = 7; i >= 0; i--) + if (mask & (1 << i)) + goto out; + for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--) + if (mask & (1 << i)) + goto out; + for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--) + if (mask & (1 << i)) + goto out; + + return "<n/a>"; + +out: + return xfer_mode_str[i]; +} + +/** + * ata_pio_devchk - PATA device presence detection + * @ap: ATA channel to examine + * @device: Device to examine (starting at zero) + * + * This technique was originally described in + * Hale Landis's ATADRVR (www.ata-atapi.com), and + * later found its way into the ATA/ATAPI spec. + * + * Write a pattern to the ATA shadow registers, + * and if a device is present, it will respond by + * correctly storing and echoing back the + * ATA shadow register contents. + * + * LOCKING: + * caller. + */ + +static unsigned int ata_pio_devchk(struct ata_port *ap, + unsigned int device) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + u8 nsect, lbal; + + ap->ops->dev_select(ap, device); + + outb(0x55, ioaddr->nsect_addr); + outb(0xaa, ioaddr->lbal_addr); + + outb(0xaa, ioaddr->nsect_addr); + outb(0x55, ioaddr->lbal_addr); + + outb(0x55, ioaddr->nsect_addr); + outb(0xaa, ioaddr->lbal_addr); + + nsect = inb(ioaddr->nsect_addr); + lbal = inb(ioaddr->lbal_addr); + + if ((nsect == 0x55) && (lbal == 0xaa)) + return 1; /* we found a device */ + + return 0; /* nothing found */ +} + +/** + * ata_mmio_devchk - PATA device presence detection + * @ap: ATA channel to examine + * @device: Device to examine (starting at zero) + * + * This technique was originally described in + * Hale Landis's ATADRVR (www.ata-atapi.com), and + * later found its way into the ATA/ATAPI spec. + * + * Write a pattern to the ATA shadow registers, + * and if a device is present, it will respond by + * correctly storing and echoing back the + * ATA shadow register contents. + * + * LOCKING: + * caller. + */ + +static unsigned int ata_mmio_devchk(struct ata_port *ap, + unsigned int device) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + u8 nsect, lbal; + + ap->ops->dev_select(ap, device); + + writeb(0x55, (void __iomem *) ioaddr->nsect_addr); + writeb(0xaa, (void __iomem *) ioaddr->lbal_addr); + + writeb(0xaa, (void __iomem *) ioaddr->nsect_addr); + writeb(0x55, (void __iomem *) ioaddr->lbal_addr); + + writeb(0x55, (void __iomem *) ioaddr->nsect_addr); + writeb(0xaa, (void __iomem *) ioaddr->lbal_addr); + + nsect = readb((void __iomem *) ioaddr->nsect_addr); + lbal = readb((void __iomem *) ioaddr->lbal_addr); + + if ((nsect == 0x55) && (lbal == 0xaa)) + return 1; /* we found a device */ + + return 0; /* nothing found */ +} + +/** + * ata_devchk - PATA device presence detection + * @ap: ATA channel to examine + * @device: Device to examine (starting at zero) + * + * Dispatch ATA device presence detection, depending + * on whether we are using PIO or MMIO to talk to the + * ATA shadow registers. + * + * LOCKING: + * caller. + */ + +static unsigned int ata_devchk(struct ata_port *ap, + unsigned int device) +{ + if (ap->flags & ATA_FLAG_MMIO) + return ata_mmio_devchk(ap, device); + return ata_pio_devchk(ap, device); +} + +/** + * ata_dev_classify - determine device type based on ATA-spec signature + * @tf: ATA taskfile register set for device to be identified + * + * Determine from taskfile register contents whether a device is + * ATA or ATAPI, as per "Signature and persistence" section + * of ATA/PI spec (volume 1, sect 5.14). + * + * LOCKING: + * None. + * + * RETURNS: + * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN + * the event of failure. + */ + +unsigned int ata_dev_classify(struct ata_taskfile *tf) +{ + /* Apple's open source Darwin code hints that some devices only + * put a proper signature into the LBA mid/high registers, + * So, we only check those. It's sufficient for uniqueness. + */ + + if (((tf->lbam == 0) && (tf->lbah == 0)) || + ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) { + DPRINTK("found ATA device by sig\n"); + return ATA_DEV_ATA; + } + + if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) || + ((tf->lbam == 0x69) && (tf->lbah == 0x96))) { + DPRINTK("found ATAPI device by sig\n"); + return ATA_DEV_ATAPI; + } + + DPRINTK("unknown device\n"); + return ATA_DEV_UNKNOWN; +} + +/** + * ata_dev_try_classify - Parse returned ATA device signature + * @ap: ATA channel to examine + * @device: Device to examine (starting at zero) + * + * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, + * an ATA/ATAPI-defined set of values is placed in the ATA + * shadow registers, indicating the results of device detection + * and diagnostics. + * + * Select the ATA device, and read the values from the ATA shadow + * registers. Then parse according to the Error register value, + * and the spec-defined values examined by ata_dev_classify(). + * + * LOCKING: + * caller. + */ + +static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device) +{ + struct ata_device *dev = &ap->device[device]; + struct ata_taskfile tf; + unsigned int class; + u8 err; + + ap->ops->dev_select(ap, device); + + memset(&tf, 0, sizeof(tf)); + + err = ata_chk_err(ap); + ap->ops->tf_read(ap, &tf); + + dev->class = ATA_DEV_NONE; + + /* see if device passed diags */ + if (err == 1) + /* do nothing */ ; + else if ((device == 0) && (err == 0x81)) + /* do nothing */ ; + else + return err; + + /* determine if device if ATA or ATAPI */ + class = ata_dev_classify(&tf); + if (class == ATA_DEV_UNKNOWN) + return err; + if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) + return err; + + dev->class = class; + + return err; +} + +/** + * ata_dev_id_string - Convert IDENTIFY DEVICE page into string + * @id: IDENTIFY DEVICE results we will examine + * @s: string into which data is output + * @ofs: offset into identify device page + * @len: length of string to return. must be an even number. + * + * The strings in the IDENTIFY DEVICE page are broken up into + * 16-bit chunks. Run through the string, and output each + * 8-bit chunk linearly, regardless of platform. + * + * LOCKING: + * caller. + */ + +void ata_dev_id_string(u16 *id, unsigned char *s, + unsigned int ofs, unsigned int len) +{ + unsigned int c; + + while (len > 0) { + c = id[ofs] >> 8; + *s = c; + s++; + + c = id[ofs] & 0xff; + *s = c; + s++; + + ofs++; + len -= 2; + } +} + +void ata_noop_dev_select (struct ata_port *ap, unsigned int device) +{ +} + +/** + * ata_std_dev_select - Select device 0/1 on ATA bus + * @ap: ATA channel to manipulate + * @device: ATA device (numbered from zero) to select + * + * Use the method defined in the ATA specification to + * make either device 0, or device 1, active on the + * ATA channel. + * + * LOCKING: + * caller. + */ + +void ata_std_dev_select (struct ata_port *ap, unsigned int device) +{ + u8 tmp; + + if (device == 0) + tmp = ATA_DEVICE_OBS; + else + tmp = ATA_DEVICE_OBS | ATA_DEV1; + + if (ap->flags & ATA_FLAG_MMIO) { + writeb(tmp, (void __iomem *) ap->ioaddr.device_addr); + } else { + outb(tmp, ap->ioaddr.device_addr); + } + ata_pause(ap); /* needed; also flushes, for mmio */ +} + +/** + * ata_dev_select - Select device 0/1 on ATA bus + * @ap: ATA channel to manipulate + * @device: ATA device (numbered from zero) to select + * @wait: non-zero to wait for Status register BSY bit to clear + * @can_sleep: non-zero if context allows sleeping + * + * Use the method defined in the ATA specification to + * make either device 0, or device 1, active on the + * ATA channel. + * + * This is a high-level version of ata_std_dev_select(), + * which additionally provides the services of inserting + * the proper pauses and status polling, where needed. + * + * LOCKING: + * caller. + */ + +void ata_dev_select(struct ata_port *ap, unsigned int device, + unsigned int wait, unsigned int can_sleep) +{ + VPRINTK("ENTER, ata%u: device %u, wait %u\n", + ap->id, device, wait); + + if (wait) + ata_wait_idle(ap); + + ap->ops->dev_select(ap, device); + + if (wait) { + if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI) + msleep(150); + ata_wait_idle(ap); + } +} + +/** + * ata_dump_id - IDENTIFY DEVICE info debugging output + * @dev: Device whose IDENTIFY DEVICE page we will dump + * + * Dump selected 16-bit words from a detected device's + * IDENTIFY PAGE page. + * + * LOCKING: + * caller. + */ + +static inline void ata_dump_id(struct ata_device *dev) +{ + DPRINTK("49==0x%04x " + "53==0x%04x " + "63==0x%04x " + "64==0x%04x " + "75==0x%04x \n", + dev->id[49], + dev->id[53], + dev->id[63], + dev->id[64], + dev->id[75]); + DPRINTK("80==0x%04x " + "81==0x%04x " + "82==0x%04x " + "83==0x%04x " + "84==0x%04x \n", + dev->id[80], + dev->id[81], + dev->id[82], + dev->id[83], + dev->id[84]); + DPRINTK("88==0x%04x " + "93==0x%04x\n", + dev->id[88], + dev->id[93]); +} + +/** + * ata_dev_identify - obtain IDENTIFY x DEVICE page + * @ap: port on which device we wish to probe resides + * @device: device bus address, starting at zero + * + * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE + * command, and read back the 512-byte device information page. + * The device information page is fed to us via the standard + * PIO-IN protocol, but we hand-code it here. (TODO: investigate + * using standard PIO-IN paths) + * + * After reading the device information page, we use several + * bits of information from it to initialize data structures + * that will be used during the lifetime of the ata_device. + * Other data from the info page is used to disqualify certain + * older ATA devices we do not wish to support. + * + * LOCKING: + * Inherited from caller. Some functions called by this function + * obtain the host_set lock. + */ + +static void ata_dev_identify(struct ata_port *ap, unsigned int device) +{ + struct ata_device *dev = &ap->device[device]; + unsigned int i; + u16 tmp; + unsigned long xfer_modes; + u8 status; + unsigned int using_edd; + DECLARE_COMPLETION(wait); + struct ata_queued_cmd *qc; + unsigned long flags; + int rc; + + if (!ata_dev_present(dev)) { + DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n", + ap->id, device); + return; + } + + if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET)) + using_edd = 0; + else + using_edd = 1; + + DPRINTK("ENTER, host %u, dev %u\n", ap->id, device); + + assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI || + dev->class == ATA_DEV_NONE); + + ata_dev_select(ap, device, 1, 1); /* select device 0/1 */ + + qc = ata_qc_new_init(ap, dev); + BUG_ON(qc == NULL); + + ata_sg_init_one(qc, dev->id, sizeof(dev->id)); + qc->dma_dir = DMA_FROM_DEVICE; + qc->tf.protocol = ATA_PROT_PIO; + qc->nsect = 1; + +retry: + if (dev->class == ATA_DEV_ATA) { + qc->tf.command = ATA_CMD_ID_ATA; + DPRINTK("do ATA identify\n"); + } else { + qc->tf.command = ATA_CMD_ID_ATAPI; + DPRINTK("do ATAPI identify\n"); + } + + qc->waiting = &wait; + qc->complete_fn = ata_qc_complete_noop; + + spin_lock_irqsave(&ap->host_set->lock, flags); + rc = ata_qc_issue(qc); + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + if (rc) + goto err_out; + else + wait_for_completion(&wait); + + status = ata_chk_status(ap); + if (status & ATA_ERR) { + /* + * arg! EDD works for all test cases, but seems to return + * the ATA signature for some ATAPI devices. Until the + * reason for this is found and fixed, we fix up the mess + * here. If IDENTIFY DEVICE returns command aborted + * (as ATAPI devices do), then we issue an + * IDENTIFY PACKET DEVICE. + * + * ATA software reset (SRST, the default) does not appear + * to have this problem. + */ + if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) { + u8 err = ata_chk_err(ap); + if (err & ATA_ABORTED) { + dev->class = ATA_DEV_ATAPI; + qc->cursg = 0; + qc->cursg_ofs = 0; + qc->cursect = 0; + qc->nsect = 1; + goto retry; + } + } + goto err_out; + } + + swap_buf_le16(dev->id, ATA_ID_WORDS); + + /* print device capabilities */ + printk(KERN_DEBUG "ata%u: dev %u cfg " + "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", + ap->id, device, dev->id[49], + dev->id[82], dev->id[83], dev->id[84], + dev->id[85], dev->id[86], dev->id[87], + dev->id[88]); + + /* + * common ATA, ATAPI feature tests + */ + + /* we require LBA and DMA support (bits 8 & 9 of word 49) */ + if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) { + printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id); + goto err_out_nosup; + } + + /* quick-n-dirty find max transfer mode; for printk only */ + xfer_modes = dev->id[ATA_ID_UDMA_MODES]; + if (!xfer_modes) + xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA; + if (!xfer_modes) { + xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3); + xfer_modes |= (0x7 << ATA_SHIFT_PIO); + } + + ata_dump_id(dev); + + /* ATA-specific feature tests */ + if (dev->class == ATA_DEV_ATA) { + if (!ata_id_is_ata(dev->id)) /* sanity check */ + goto err_out_nosup; + + tmp = dev->id[ATA_ID_MAJOR_VER]; + for (i = 14; i >= 1; i--) + if (tmp & (1 << i)) + break; + + /* we require at least ATA-3 */ + if (i < 3) { + printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id); + goto err_out_nosup; + } + + if (ata_id_has_lba48(dev->id)) { + dev->flags |= ATA_DFLAG_LBA48; + dev->n_sectors = ata_id_u64(dev->id, 100); + } else { + dev->n_sectors = ata_id_u32(dev->id, 60); + } + + ap->host->max_cmd_len = 16; + + /* print device info to dmesg */ + printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n", + ap->id, device, + ata_mode_string(xfer_modes), + (unsigned long long)dev->n_sectors, + dev->flags & ATA_DFLAG_LBA48 ? " lba48" : ""); + } + + /* ATAPI-specific feature tests */ + else { + if (ata_id_is_ata(dev->id)) /* sanity check */ + goto err_out_nosup; + + rc = atapi_cdb_len(dev->id); + if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { + printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); + goto err_out_nosup; + } + ap->cdb_len = (unsigned int) rc; + ap->host->max_cmd_len = (unsigned char) ap->cdb_len; + + /* print device info to dmesg */ + printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", + ap->id, device, + ata_mode_string(xfer_modes)); + } + + DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); + return; + +err_out_nosup: + printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n", + ap->id, device); +err_out: + dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */ + DPRINTK("EXIT, err\n"); +} + +/** + * ata_bus_probe - Reset and probe ATA bus + * @ap: Bus to probe + * + * LOCKING: + * + * RETURNS: + * Zero on success, non-zero on error. + */ + +static int ata_bus_probe(struct ata_port *ap) +{ + unsigned int i, found = 0; + + ap->ops->phy_reset(ap); + if (ap->flags & ATA_FLAG_PORT_DISABLED) + goto err_out; + + for (i = 0; i < ATA_MAX_DEVICES; i++) { + ata_dev_identify(ap, i); + if (ata_dev_present(&ap->device[i])) { + found = 1; + if (ap->ops->dev_config) + ap->ops->dev_config(ap, &ap->device[i]); + } + } + + if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED)) + goto err_out_disable; + + ata_set_mode(ap); + if (ap->flags & ATA_FLAG_PORT_DISABLED) + goto err_out_disable; + + return 0; + +err_out_disable: + ap->ops->port_disable(ap); +err_out: + return -1; +} + +/** + * ata_port_probe - + * @ap: + * + * LOCKING: + */ + +void ata_port_probe(struct ata_port *ap) +{ + ap->flags &= ~ATA_FLAG_PORT_DISABLED; +} + +/** + * __sata_phy_reset - + * @ap: + * + * LOCKING: + * + */ +void __sata_phy_reset(struct ata_port *ap) +{ + u32 sstatus; + unsigned long timeout = jiffies + (HZ * 5); + + if (ap->flags & ATA_FLAG_SATA_RESET) { + scr_write(ap, SCR_CONTROL, 0x301); /* issue phy wake/reset */ + scr_read(ap, SCR_STATUS); /* dummy read; flush */ + udelay(400); /* FIXME: a guess */ + } + scr_write(ap, SCR_CONTROL, 0x300); /* issue phy wake/clear reset */ + + /* wait for phy to become ready, if necessary */ + do { + msleep(200); + sstatus = scr_read(ap, SCR_STATUS); + if ((sstatus & 0xf) != 1) + break; + } while (time_before(jiffies, timeout)); + + /* TODO: phy layer with polling, timeouts, etc. */ + if (sata_dev_present(ap)) + ata_port_probe(ap); + else { + sstatus = scr_read(ap, SCR_STATUS); + printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", + ap->id, sstatus); + ata_port_disable(ap); + } + + if (ap->flags & ATA_FLAG_PORT_DISABLED) + return; + + if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { + ata_port_disable(ap); + return; + } + + ap->cbl = ATA_CBL_SATA; +} + +/** + * __sata_phy_reset - + * @ap: + * + * LOCKING: + * + */ +void sata_phy_reset(struct ata_port *ap) +{ + __sata_phy_reset(ap); + if (ap->flags & ATA_FLAG_PORT_DISABLED) + return; + ata_bus_reset(ap); +} + +/** + * ata_port_disable - + * @ap: + * + * LOCKING: + */ + +void ata_port_disable(struct ata_port *ap) +{ + ap->device[0].class = ATA_DEV_NONE; + ap->device[1].class = ATA_DEV_NONE; + ap->flags |= ATA_FLAG_PORT_DISABLED; +} + +static struct { + unsigned int shift; + u8 base; +} xfer_mode_classes[] = { + { ATA_SHIFT_UDMA, XFER_UDMA_0 }, + { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 }, + { ATA_SHIFT_PIO, XFER_PIO_0 }, +}; + +static inline u8 base_from_shift(unsigned int shift) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) + if (xfer_mode_classes[i].shift == shift) + return xfer_mode_classes[i].base; + + return 0xff; +} + +static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) +{ + int ofs, idx; + u8 base; + + if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) + return; + + if (dev->xfer_shift == ATA_SHIFT_PIO) + dev->flags |= ATA_DFLAG_PIO; + + ata_dev_set_xfermode(ap, dev); + + base = base_from_shift(dev->xfer_shift); + ofs = dev->xfer_mode - base; + idx = ofs + dev->xfer_shift; + WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str)); + + DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n", + idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs); + + printk(KERN_INFO "ata%u: dev %u configured for %s\n", + ap->id, dev->devno, xfer_mode_str[idx]); +} + +static int ata_host_set_pio(struct ata_port *ap) +{ + unsigned int mask; + int x, i; + u8 base, xfer_mode; + + mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO); + x = fgb(mask); + if (x < 0) { + printk(KERN_WARNING "ata%u: no PIO support\n", ap->id); + return -1; + } + + base = base_from_shift(ATA_SHIFT_PIO); + xfer_mode = base + x; + + DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n", + (int)base, (int)xfer_mode, mask, x); + + for (i = 0; i < ATA_MAX_DEVICES; i++) { + struct ata_device *dev = &ap->device[i]; + if (ata_dev_present(dev)) { + dev->pio_mode = xfer_mode; + dev->xfer_mode = xfer_mode; + dev->xfer_shift = ATA_SHIFT_PIO; + if (ap->ops->set_piomode) + ap->ops->set_piomode(ap, dev); + } + } + + return 0; +} + +static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode, + unsigned int xfer_shift) +{ + int i; + + for (i = 0; i < ATA_MAX_DEVICES; i++) { + struct ata_device *dev = &ap->device[i]; + if (ata_dev_present(dev)) { + dev->dma_mode = xfer_mode; + dev->xfer_mode = xfer_mode; + dev->xfer_shift = xfer_shift; + if (ap->ops->set_dmamode) + ap->ops->set_dmamode(ap, dev); + } + } +} + +/** + * ata_set_mode - Program timings and issue SET FEATURES - XFER + * @ap: port on which timings will be programmed + * + * LOCKING: + * + */ +static void ata_set_mode(struct ata_port *ap) +{ + unsigned int i, xfer_shift; + u8 xfer_mode; + int rc; + + /* step 1: always set host PIO timings */ + rc = ata_host_set_pio(ap); + if (rc) + goto err_out; + + /* step 2: choose the best data xfer mode */ + xfer_mode = xfer_shift = 0; + rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift); + if (rc) + goto err_out; + + /* step 3: if that xfer mode isn't PIO, set host DMA timings */ + if (xfer_shift != ATA_SHIFT_PIO) + ata_host_set_dma(ap, xfer_mode, xfer_shift); + + /* step 4: update devices' xfer mode */ + ata_dev_set_mode(ap, &ap->device[0]); + ata_dev_set_mode(ap, &ap->device[1]); + + if (ap->flags & ATA_FLAG_PORT_DISABLED) + return; + + if (ap->ops->post_set_mode) + ap->ops->post_set_mode(ap); + + for (i = 0; i < 2; i++) { + struct ata_device *dev = &ap->device[i]; + ata_dev_set_protocol(dev); + } + + return; + +err_out: + ata_port_disable(ap); +} + +/** + * ata_busy_sleep - sleep until BSY clears, or timeout + * @ap: port containing status register to be polled + * @tmout_pat: impatience timeout + * @tmout: overall timeout + * + * LOCKING: + * + */ + +static unsigned int ata_busy_sleep (struct ata_port *ap, + unsigned long tmout_pat, + unsigned long tmout) +{ + unsigned long timer_start, timeout; + u8 status; + + status = ata_busy_wait(ap, ATA_BUSY, 300); + timer_start = jiffies; + timeout = timer_start + tmout_pat; + while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { + msleep(50); + status = ata_busy_wait(ap, ATA_BUSY, 3); + } + + if (status & ATA_BUSY) + printk(KERN_WARNING "ata%u is slow to respond, " + "please be patient\n", ap->id); + + timeout = timer_start + tmout; + while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { + msleep(50); + status = ata_chk_status(ap); + } + + if (status & ATA_BUSY) { + printk(KERN_ERR "ata%u failed to respond (%lu secs)\n", + ap->id, tmout / HZ); + return 1; + } + + return 0; +} + +static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int dev0 = devmask & (1 << 0); + unsigned int dev1 = devmask & (1 << 1); + unsigned long timeout; + + /* if device 0 was found in ata_devchk, wait for its + * BSY bit to clear + */ + if (dev0) + ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + + /* if device 1 was found in ata_devchk, wait for + * register access, then wait for BSY to clear + */ + timeout = jiffies + ATA_TMOUT_BOOT; + while (dev1) { + u8 nsect, lbal; + + ap->ops->dev_select(ap, 1); + if (ap->flags & ATA_FLAG_MMIO) { + nsect = readb((void __iomem *) ioaddr->nsect_addr); + lbal = readb((void __iomem *) ioaddr->lbal_addr); + } else { + nsect = inb(ioaddr->nsect_addr); + lbal = inb(ioaddr->lbal_addr); + } + if ((nsect == 1) && (lbal == 1)) + break; + if (time_after(jiffies, timeout)) { + dev1 = 0; + break; + } + msleep(50); /* give drive a breather */ + } + if (dev1) + ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + + /* is all this really necessary? */ + ap->ops->dev_select(ap, 0); + if (dev1) + ap->ops->dev_select(ap, 1); + if (dev0) + ap->ops->dev_select(ap, 0); +} + +/** + * ata_bus_edd - + * @ap: + * + * LOCKING: + * + */ + +static unsigned int ata_bus_edd(struct ata_port *ap) +{ + struct ata_taskfile tf; + + /* set up execute-device-diag (bus reset) taskfile */ + /* also, take interrupts to a known state (disabled) */ + DPRINTK("execute-device-diag\n"); + ata_tf_init(ap, &tf, 0); + tf.ctl |= ATA_NIEN; + tf.command = ATA_CMD_EDD; + tf.protocol = ATA_PROT_NODATA; + + /* do bus reset */ + ata_tf_to_host(ap, &tf); + + /* spec says at least 2ms. but who knows with those + * crazy ATAPI devices... + */ + msleep(150); + + return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); +} + +static unsigned int ata_bus_softreset(struct ata_port *ap, + unsigned int devmask) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + DPRINTK("ata%u: bus reset via SRST\n", ap->id); + + /* software reset. causes dev0 to be selected */ + if (ap->flags & ATA_FLAG_MMIO) { + writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); + udelay(20); /* FIXME: flush */ + writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr); + udelay(20); /* FIXME: flush */ + writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); + } else { + outb(ap->ctl, ioaddr->ctl_addr); + udelay(10); + outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr); + udelay(10); + outb(ap->ctl, ioaddr->ctl_addr); + } + + /* spec mandates ">= 2ms" before checking status. + * We wait 150ms, because that was the magic delay used for + * ATAPI devices in Hale Landis's ATADRVR, for the period of time + * between when the ATA command register is written, and then + * status is checked. Because waiting for "a while" before + * checking status is fine, post SRST, we perform this magic + * delay here as well. + */ + msleep(150); + + ata_bus_post_reset(ap, devmask); + + return 0; +} + +/** + * ata_bus_reset - reset host port and associated ATA channel + * @ap: port to reset + * + * This is typically the first time we actually start issuing + * commands to the ATA channel. We wait for BSY to clear, then + * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its + * result. Determine what devices, if any, are on the channel + * by looking at the device 0/1 error register. Look at the signature + * stored in each device's taskfile registers, to determine if + * the device is ATA or ATAPI. + * + * LOCKING: + * Inherited from caller. Some functions called by this function + * obtain the host_set lock. + * + * SIDE EFFECTS: + * Sets ATA_FLAG_PORT_DISABLED if bus reset fails. + */ + +void ata_bus_reset(struct ata_port *ap) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; + u8 err; + unsigned int dev0, dev1 = 0, rc = 0, devmask = 0; + + DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no); + + /* determine if device 0/1 are present */ + if (ap->flags & ATA_FLAG_SATA_RESET) + dev0 = 1; + else { + dev0 = ata_devchk(ap, 0); + if (slave_possible) + dev1 = ata_devchk(ap, 1); + } + + if (dev0) + devmask |= (1 << 0); + if (dev1) + devmask |= (1 << 1); + + /* select device 0 again */ + ap->ops->dev_select(ap, 0); + + /* issue bus reset */ + if (ap->flags & ATA_FLAG_SRST) + rc = ata_bus_softreset(ap, devmask); + else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) { + /* set up device control */ + if (ap->flags & ATA_FLAG_MMIO) + writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); + else + outb(ap->ctl, ioaddr->ctl_addr); + rc = ata_bus_edd(ap); + } + + if (rc) + goto err_out; + + /* + * determine by signature whether we have ATA or ATAPI devices + */ + err = ata_dev_try_classify(ap, 0); + if ((slave_possible) && (err != 0x81)) + ata_dev_try_classify(ap, 1); + + /* re-enable interrupts */ + if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ + ata_irq_on(ap); + + /* is double-select really necessary? */ + if (ap->device[1].class != ATA_DEV_NONE) + ap->ops->dev_select(ap, 1); + if (ap->device[0].class != ATA_DEV_NONE) + ap->ops->dev_select(ap, 0); + + /* if no devices were detected, disable this port */ + if ((ap->device[0].class == ATA_DEV_NONE) && + (ap->device[1].class == ATA_DEV_NONE)) + goto err_out; + + if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { + /* set up device control for ATA_FLAG_SATA_RESET */ + if (ap->flags & ATA_FLAG_MMIO) + writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); + else + outb(ap->ctl, ioaddr->ctl_addr); + } + + DPRINTK("EXIT\n"); + return; + +err_out: + printk(KERN_ERR "ata%u: disabling port\n", ap->id); + ap->ops->port_disable(ap); + + DPRINTK("EXIT\n"); +} + +static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev) +{ + printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n", + ap->id, dev->devno); +} + +static const char * ata_dma_blacklist [] = { + "WDC AC11000H", + "WDC AC22100H", + "WDC AC32500H", + "WDC AC33100H", + "WDC AC31600H", + "WDC AC32100H", + "WDC AC23200L", + "Compaq CRD-8241B", + "CRD-8400B", + "CRD-8480B", + "CRD-8482B", + "CRD-84", + "SanDisk SDP3B", + "SanDisk SDP3B-64", + "SANYO CD-ROM CRD", + "HITACHI CDR-8", + "HITACHI CDR-8335", + "HITACHI CDR-8435", + "Toshiba CD-ROM XM-6202B", + "CD-532E-A", + "E-IDE CD-ROM CR-840", + "CD-ROM Drive/F5A", + "WPI CDD-820", + "SAMSUNG CD-ROM SC-148C", + "SAMSUNG CD-ROM SC", + "SanDisk SDP3B-64", + "SAMSUNG CD-ROM SN-124", + "ATAPI CD-ROM DRIVE 40X MAXIMUM", + "_NEC DV5800A", +}; + +static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev) +{ + unsigned char model_num[40]; + char *s; + unsigned int len; + int i; + + ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, + sizeof(model_num)); + s = &model_num[0]; + len = strnlen(s, sizeof(model_num)); + + /* ATAPI specifies that empty space is blank-filled; remove blanks */ + while ((len > 0) && (s[len - 1] == ' ')) { + len--; + s[len] = 0; + } + + for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++) + if (!strncmp(ata_dma_blacklist[i], s, len)) + return 1; + + return 0; +} + +static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift) +{ + struct ata_device *master, *slave; + unsigned int mask; + + master = &ap->device[0]; + slave = &ap->device[1]; + + assert (ata_dev_present(master) || ata_dev_present(slave)); + + if (shift == ATA_SHIFT_UDMA) { + mask = ap->udma_mask; + if (ata_dev_present(master)) { + mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff); + if (ata_dma_blacklisted(ap, master)) { + mask = 0; + ata_pr_blacklisted(ap, master); + } + } + if (ata_dev_present(slave)) { + mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff); + if (ata_dma_blacklisted(ap, slave)) { + mask = 0; + ata_pr_blacklisted(ap, slave); + } + } + } + else if (shift == ATA_SHIFT_MWDMA) { + mask = ap->mwdma_mask; + if (ata_dev_present(master)) { + mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07); + if (ata_dma_blacklisted(ap, master)) { + mask = 0; + ata_pr_blacklisted(ap, master); + } + } + if (ata_dev_present(slave)) { + mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07); + if (ata_dma_blacklisted(ap, slave)) { + mask = 0; + ata_pr_blacklisted(ap, slave); + } + } + } + else if (shift == ATA_SHIFT_PIO) { + mask = ap->pio_mask; + if (ata_dev_present(master)) { + /* spec doesn't return explicit support for + * PIO0-2, so we fake it + */ + u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03; + tmp_mode <<= 3; + tmp_mode |= 0x7; + mask &= tmp_mode; + } + if (ata_dev_present(slave)) { + /* spec doesn't return explicit support for + * PIO0-2, so we fake it + */ + u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03; + tmp_mode <<= 3; + tmp_mode |= 0x7; + mask &= tmp_mode; + } + } + else { + mask = 0xffffffff; /* shut up compiler warning */ + BUG(); + } + + return mask; +} + +/* find greatest bit */ +static int fgb(u32 bitmap) +{ + unsigned int i; + int x = -1; + + for (i = 0; i < 32; i++) + if (bitmap & (1 << i)) + x = i; + + return x; +} + +/** + * ata_choose_xfer_mode - attempt to find best transfer mode + * @ap: Port for which an xfer mode will be selected + * @xfer_mode_out: (output) SET FEATURES - XFER MODE code + * @xfer_shift_out: (output) bit shift that selects this mode + * + * LOCKING: + * + * RETURNS: + * Zero on success, negative on error. + */ + +static int ata_choose_xfer_mode(struct ata_port *ap, + u8 *xfer_mode_out, + unsigned int *xfer_shift_out) +{ + unsigned int mask, shift; + int x, i; + + for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) { + shift = xfer_mode_classes[i].shift; + mask = ata_get_mode_mask(ap, shift); + + x = fgb(mask); + if (x >= 0) { + *xfer_mode_out = xfer_mode_classes[i].base + x; + *xfer_shift_out = shift; + return 0; + } + } + + return -1; +} + +/** + * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command + * @ap: Port associated with device @dev + * @dev: Device to which command will be sent + * + * LOCKING: + */ + +static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) +{ + DECLARE_COMPLETION(wait); + struct ata_queued_cmd *qc; + int rc; + unsigned long flags; + + /* set up set-features taskfile */ + DPRINTK("set features - xfer mode\n"); + + qc = ata_qc_new_init(ap, dev); + BUG_ON(qc == NULL); + + qc->tf.command = ATA_CMD_SET_FEATURES; + qc->tf.feature = SETFEATURES_XFER; + qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + qc->tf.protocol = ATA_PROT_NODATA; + qc->tf.nsect = dev->xfer_mode; + + qc->waiting = &wait; + qc->complete_fn = ata_qc_complete_noop; + + spin_lock_irqsave(&ap->host_set->lock, flags); + rc = ata_qc_issue(qc); + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + if (rc) + ata_port_disable(ap); + else + wait_for_completion(&wait); + + DPRINTK("EXIT\n"); +} + +/** + * ata_sg_clean - + * @qc: + * + * LOCKING: + */ + +static void ata_sg_clean(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct scatterlist *sg = qc->sg; + int dir = qc->dma_dir; + + assert(qc->flags & ATA_QCFLAG_DMAMAP); + assert(sg != NULL); + + if (qc->flags & ATA_QCFLAG_SINGLE) + assert(qc->n_elem == 1); + + DPRINTK("unmapping %u sg elements\n", qc->n_elem); + + if (qc->flags & ATA_QCFLAG_SG) + dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); + else + dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), + sg_dma_len(&sg[0]), dir); + + qc->flags &= ~ATA_QCFLAG_DMAMAP; + qc->sg = NULL; +} + +/** + * ata_fill_sg - Fill PCI IDE PRD table + * @qc: Metadata associated with taskfile to be transferred + * + * LOCKING: + * + */ +static void ata_fill_sg(struct ata_queued_cmd *qc) +{ + struct scatterlist *sg = qc->sg; + struct ata_port *ap = qc->ap; + unsigned int idx, nelem; + + assert(sg != NULL); + assert(qc->n_elem > 0); + + idx = 0; + for (nelem = qc->n_elem; nelem; nelem--,sg++) { + u32 addr, offset; + u32 sg_len, len; + + /* determine if physical DMA addr spans 64K boundary. + * Note h/w doesn't support 64-bit, so we unconditionally + * truncate dma_addr_t to u32. + */ + addr = (u32) sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + while (sg_len) { + offset = addr & 0xffff; + len = sg_len; + if ((offset + sg_len) > 0x10000) + len = 0x10000 - offset; + + ap->prd[idx].addr = cpu_to_le32(addr); + ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); + + idx++; + sg_len -= len; + addr += len; + } + } + + if (idx) + ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); +} +/** + * ata_check_atapi_dma - Check whether ATAPI DMA can be supported + * @qc: Metadata associated with taskfile to check + * + * LOCKING: + * RETURNS: 0 when ATAPI DMA can be used + * nonzero otherwise + */ +int ata_check_atapi_dma(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + int rc = 0; /* Assume ATAPI DMA is OK by default */ + + if (ap->ops->check_atapi_dma) + rc = ap->ops->check_atapi_dma(qc); + + return rc; +} +/** + * ata_qc_prep - Prepare taskfile for submission + * @qc: Metadata associated with taskfile to be prepared + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ +void ata_qc_prep(struct ata_queued_cmd *qc) +{ + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) + return; + + ata_fill_sg(qc); +} + +void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) +{ + struct scatterlist *sg; + + qc->flags |= ATA_QCFLAG_SINGLE; + + memset(&qc->sgent, 0, sizeof(qc->sgent)); + qc->sg = &qc->sgent; + qc->n_elem = 1; + qc->buf_virt = buf; + + sg = qc->sg; + sg->page = virt_to_page(buf); + sg->offset = (unsigned long) buf & ~PAGE_MASK; + sg_dma_len(sg) = buflen; +} + +void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, + unsigned int n_elem) +{ + qc->flags |= ATA_QCFLAG_SG; + qc->sg = sg; + qc->n_elem = n_elem; +} + +/** + * ata_sg_setup_one - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * + */ + +static int ata_sg_setup_one(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + int dir = qc->dma_dir; + struct scatterlist *sg = qc->sg; + dma_addr_t dma_address; + + dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, + sg_dma_len(sg), dir); + if (dma_mapping_error(dma_address)) + return -1; + + sg_dma_address(sg) = dma_address; + + DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), + qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); + + return 0; +} + +/** + * ata_sg_setup - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * + */ + +static int ata_sg_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct scatterlist *sg = qc->sg; + int n_elem, dir; + + VPRINTK("ENTER, ata%u\n", ap->id); + assert(qc->flags & ATA_QCFLAG_SG); + + dir = qc->dma_dir; + n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); + if (n_elem < 1) + return -1; + + DPRINTK("%d sg elements mapped\n", n_elem); + + qc->n_elem = n_elem; + + return 0; +} + +/** + * ata_pio_poll - + * @ap: + * + * LOCKING: + * + * RETURNS: + * + */ + +static unsigned long ata_pio_poll(struct ata_port *ap) +{ + u8 status; + unsigned int poll_state = PIO_ST_UNKNOWN; + unsigned int reg_state = PIO_ST_UNKNOWN; + const unsigned int tmout_state = PIO_ST_TMOUT; + + switch (ap->pio_task_state) { + case PIO_ST: + case PIO_ST_POLL: + poll_state = PIO_ST_POLL; + reg_state = PIO_ST; + break; + case PIO_ST_LAST: + case PIO_ST_LAST_POLL: + poll_state = PIO_ST_LAST_POLL; + reg_state = PIO_ST_LAST; + break; + default: + BUG(); + break; + } + + status = ata_chk_status(ap); + if (status & ATA_BUSY) { + if (time_after(jiffies, ap->pio_task_timeout)) { + ap->pio_task_state = tmout_state; + return 0; + } + ap->pio_task_state = poll_state; + return ATA_SHORT_PAUSE; + } + + ap->pio_task_state = reg_state; + return 0; +} + +/** + * ata_pio_complete - + * @ap: + * + * LOCKING: + */ + +static void ata_pio_complete (struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + u8 drv_stat; + + /* + * This is purely hueristic. This is a fast path. + * Sometimes when we enter, BSY will be cleared in + * a chk-status or two. If not, the drive is probably seeking + * or something. Snooze for a couple msecs, then + * chk-status again. If still busy, fall back to + * PIO_ST_POLL state. + */ + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); + if (drv_stat & (ATA_BUSY | ATA_DRQ)) { + msleep(2); + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); + if (drv_stat & (ATA_BUSY | ATA_DRQ)) { + ap->pio_task_state = PIO_ST_LAST_POLL; + ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; + return; + } + } + + drv_stat = ata_wait_idle(ap); + if (!ata_ok(drv_stat)) { + ap->pio_task_state = PIO_ST_ERR; + return; + } + + qc = ata_qc_from_tag(ap, ap->active_tag); + assert(qc != NULL); + + ap->pio_task_state = PIO_ST_IDLE; + + ata_irq_on(ap); + + ata_qc_complete(qc, drv_stat); +} + +void swap_buf_le16(u16 *buf, unsigned int buf_words) +{ +#ifdef __BIG_ENDIAN + unsigned int i; + + for (i = 0; i < buf_words; i++) + buf[i] = le16_to_cpu(buf[i]); +#endif /* __BIG_ENDIAN */ +} + +static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, + unsigned int buflen, int write_data) +{ + unsigned int i; + unsigned int words = buflen >> 1; + u16 *buf16 = (u16 *) buf; + void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr; + + if (write_data) { + for (i = 0; i < words; i++) + writew(le16_to_cpu(buf16[i]), mmio); + } else { + for (i = 0; i < words; i++) + buf16[i] = cpu_to_le16(readw(mmio)); + } +} + +static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, + unsigned int buflen, int write_data) +{ + unsigned int dwords = buflen >> 1; + + if (write_data) + outsw(ap->ioaddr.data_addr, buf, dwords); + else + insw(ap->ioaddr.data_addr, buf, dwords); +} + +static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, + unsigned int buflen, int do_write) +{ + if (ap->flags & ATA_FLAG_MMIO) + ata_mmio_data_xfer(ap, buf, buflen, do_write); + else + ata_pio_data_xfer(ap, buf, buflen, do_write); +} + +static void ata_pio_sector(struct ata_queued_cmd *qc) +{ + int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); + struct scatterlist *sg = qc->sg; + struct ata_port *ap = qc->ap; + struct page *page; + unsigned int offset; + unsigned char *buf; + + if (qc->cursect == (qc->nsect - 1)) + ap->pio_task_state = PIO_ST_LAST; + + page = sg[qc->cursg].page; + offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE; + + /* get the current page and offset */ + page = nth_page(page, (offset >> PAGE_SHIFT)); + offset %= PAGE_SIZE; + + buf = kmap(page) + offset; + + qc->cursect++; + qc->cursg_ofs++; + + if ((qc->cursg_ofs * ATA_SECT_SIZE) == sg_dma_len(&sg[qc->cursg])) { + qc->cursg++; + qc->cursg_ofs = 0; + } + + DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); + + /* do the actual data transfer */ + do_write = (qc->tf.flags & ATA_TFLAG_WRITE); + ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); + + kunmap(page); +} + +static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) +{ + int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); + struct scatterlist *sg = qc->sg; + struct ata_port *ap = qc->ap; + struct page *page; + unsigned char *buf; + unsigned int offset, count; + + if (qc->curbytes == qc->nbytes - bytes) + ap->pio_task_state = PIO_ST_LAST; + +next_sg: + sg = &qc->sg[qc->cursg]; + +next_page: + page = sg->page; + offset = sg->offset + qc->cursg_ofs; + + /* get the current page and offset */ + page = nth_page(page, (offset >> PAGE_SHIFT)); + offset %= PAGE_SIZE; + + count = min(sg_dma_len(sg) - qc->cursg_ofs, bytes); + + /* don't cross page boundaries */ + count = min(count, (unsigned int)PAGE_SIZE - offset); + + buf = kmap(page) + offset; + + bytes -= count; + qc->curbytes += count; + qc->cursg_ofs += count; + + if (qc->cursg_ofs == sg_dma_len(sg)) { + qc->cursg++; + qc->cursg_ofs = 0; + } + + DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); + + /* do the actual data transfer */ + ata_data_xfer(ap, buf, count, do_write); + + kunmap(page); + + if (bytes) { + if (qc->cursg_ofs < sg_dma_len(sg)) + goto next_page; + goto next_sg; + } +} + +static void atapi_pio_bytes(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_device *dev = qc->dev; + unsigned int ireason, bc_lo, bc_hi, bytes; + int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; + + ap->ops->tf_read(ap, &qc->tf); + ireason = qc->tf.nsect; + bc_lo = qc->tf.lbam; + bc_hi = qc->tf.lbah; + bytes = (bc_hi << 8) | bc_lo; + + /* shall be cleared to zero, indicating xfer of data */ + if (ireason & (1 << 0)) + goto err_out; + + /* make sure transfer direction matches expected */ + i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; + if (do_write != i_write) + goto err_out; + + __atapi_pio_bytes(qc, bytes); + + return; + +err_out: + printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", + ap->id, dev->devno); + ap->pio_task_state = PIO_ST_ERR; +} + +/** + * ata_pio_sector - + * @ap: + * + * LOCKING: + */ + +static void ata_pio_block(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + u8 status; + + /* + * This is purely hueristic. This is a fast path. + * Sometimes when we enter, BSY will be cleared in + * a chk-status or two. If not, the drive is probably seeking + * or something. Snooze for a couple msecs, then + * chk-status again. If still busy, fall back to + * PIO_ST_POLL state. + */ + status = ata_busy_wait(ap, ATA_BUSY, 5); + if (status & ATA_BUSY) { + msleep(2); + status = ata_busy_wait(ap, ATA_BUSY, 10); + if (status & ATA_BUSY) { + ap->pio_task_state = PIO_ST_POLL; + ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; + return; + } + } + + qc = ata_qc_from_tag(ap, ap->active_tag); + assert(qc != NULL); + + if (is_atapi_taskfile(&qc->tf)) { + /* no more data to transfer or unsupported ATAPI command */ + if ((status & ATA_DRQ) == 0) { + ap->pio_task_state = PIO_ST_IDLE; + + ata_irq_on(ap); + + ata_qc_complete(qc, status); + return; + } + + atapi_pio_bytes(qc); + } else { + /* handle BSY=0, DRQ=0 as error */ + if ((status & ATA_DRQ) == 0) { + ap->pio_task_state = PIO_ST_ERR; + return; + } + + ata_pio_sector(qc); + } +} + +static void ata_pio_error(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + u8 drv_stat; + + qc = ata_qc_from_tag(ap, ap->active_tag); + assert(qc != NULL); + + drv_stat = ata_chk_status(ap); + printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n", + ap->id, drv_stat); + + ap->pio_task_state = PIO_ST_IDLE; + + ata_irq_on(ap); + + ata_qc_complete(qc, drv_stat | ATA_ERR); +} + +static void ata_pio_task(void *_data) +{ + struct ata_port *ap = _data; + unsigned long timeout = 0; + + switch (ap->pio_task_state) { + case PIO_ST_IDLE: + return; + + case PIO_ST: + ata_pio_block(ap); + break; + + case PIO_ST_LAST: + ata_pio_complete(ap); + break; + + case PIO_ST_POLL: + case PIO_ST_LAST_POLL: + timeout = ata_pio_poll(ap); + break; + + case PIO_ST_TMOUT: + case PIO_ST_ERR: + ata_pio_error(ap); + return; + } + + if (timeout) + queue_delayed_work(ata_wq, &ap->pio_task, + timeout); + else + queue_work(ata_wq, &ap->pio_task); +} + +static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev, + struct scsi_cmnd *cmd) +{ + DECLARE_COMPLETION(wait); + struct ata_queued_cmd *qc; + unsigned long flags; + int rc; + + DPRINTK("ATAPI request sense\n"); + + qc = ata_qc_new_init(ap, dev); + BUG_ON(qc == NULL); + + /* FIXME: is this needed? */ + memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); + + ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); + qc->dma_dir = DMA_FROM_DEVICE; + + memset(&qc->cdb, 0, sizeof(ap->cdb_len)); + qc->cdb[0] = REQUEST_SENSE; + qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; + + qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + qc->tf.command = ATA_CMD_PACKET; + + qc->tf.protocol = ATA_PROT_ATAPI; + qc->tf.lbam = (8 * 1024) & 0xff; + qc->tf.lbah = (8 * 1024) >> 8; + qc->nbytes = SCSI_SENSE_BUFFERSIZE; + + qc->waiting = &wait; + qc->complete_fn = ata_qc_complete_noop; + + spin_lock_irqsave(&ap->host_set->lock, flags); + rc = ata_qc_issue(qc); + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + if (rc) + ata_port_disable(ap); + else + wait_for_completion(&wait); + + DPRINTK("EXIT\n"); +} + +/** + * ata_qc_timeout - Handle timeout of queued command + * @qc: Command that timed out + * + * Some part of the kernel (currently, only the SCSI layer) + * has noticed that the active command on port @ap has not + * completed after a specified length of time. Handle this + * condition by disabling DMA (if necessary) and completing + * transactions, with error if necessary. + * + * This also handles the case of the "lost interrupt", where + * for some reason (possibly hardware bug, possibly driver bug) + * an interrupt was not delivered to the driver, even though the + * transaction completed successfully. + * + * LOCKING: + */ + +static void ata_qc_timeout(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_device *dev = qc->dev; + u8 host_stat = 0, drv_stat; + + DPRINTK("ENTER\n"); + + /* FIXME: doesn't this conflict with timeout handling? */ + if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) { + struct scsi_cmnd *cmd = qc->scsicmd; + + if (!scsi_eh_eflags_chk(cmd, SCSI_EH_CANCEL_CMD)) { + + /* finish completing original command */ + __ata_qc_complete(qc); + + atapi_request_sense(ap, dev, cmd); + + cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16); + scsi_finish_command(cmd); + + goto out; + } + } + + /* hack alert! We cannot use the supplied completion + * function from inside the ->eh_strategy_handler() thread. + * libata is the only user of ->eh_strategy_handler() in + * any kernel, so the default scsi_done() assumes it is + * not being called from the SCSI EH. + */ + qc->scsidone = scsi_finish_command; + + switch (qc->tf.protocol) { + + case ATA_PROT_DMA: + case ATA_PROT_ATAPI_DMA: + host_stat = ap->ops->bmdma_status(ap); + + /* before we do anything else, clear DMA-Start bit */ + ap->ops->bmdma_stop(ap); + + /* fall through */ + + default: + ata_altstatus(ap); + drv_stat = ata_chk_status(ap); + + /* ack bmdma irq events */ + ap->ops->irq_clear(ap); + + printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", + ap->id, qc->tf.command, drv_stat, host_stat); + + /* complete taskfile transaction */ + ata_qc_complete(qc, drv_stat); + break; + } +out: + DPRINTK("EXIT\n"); +} + +/** + * ata_eng_timeout - Handle timeout of queued command + * @ap: Port on which timed-out command is active + * + * Some part of the kernel (currently, only the SCSI layer) + * has noticed that the active command on port @ap has not + * completed after a specified length of time. Handle this + * condition by disabling DMA (if necessary) and completing + * transactions, with error if necessary. + * + * This also handles the case of the "lost interrupt", where + * for some reason (possibly hardware bug, possibly driver bug) + * an interrupt was not delivered to the driver, even though the + * transaction completed successfully. + * + * LOCKING: + * Inherited from SCSI layer (none, can sleep) + */ + +void ata_eng_timeout(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + + DPRINTK("ENTER\n"); + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (!qc) { + printk(KERN_ERR "ata%u: BUG: timeout without command\n", + ap->id); + goto out; + } + + ata_qc_timeout(qc); + +out: + DPRINTK("EXIT\n"); +} + +/** + * ata_qc_new - Request an available ATA command, for queueing + * @ap: Port associated with device @dev + * @dev: Device from whom we request an available command structure + * + * LOCKING: + */ + +static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) +{ + struct ata_queued_cmd *qc = NULL; + unsigned int i; + + for (i = 0; i < ATA_MAX_QUEUE; i++) + if (!test_and_set_bit(i, &ap->qactive)) { + qc = ata_qc_from_tag(ap, i); + break; + } + + if (qc) + qc->tag = i; + + return qc; +} + +/** + * ata_qc_new_init - Request an available ATA command, and initialize it + * @ap: Port associated with device @dev + * @dev: Device from whom we request an available command structure + * + * LOCKING: + */ + +struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, + struct ata_device *dev) +{ + struct ata_queued_cmd *qc; + + qc = ata_qc_new(ap); + if (qc) { + qc->sg = NULL; + qc->flags = 0; + qc->scsicmd = NULL; + qc->ap = ap; + qc->dev = dev; + qc->cursect = qc->cursg = qc->cursg_ofs = 0; + qc->nsect = 0; + qc->nbytes = qc->curbytes = 0; + + ata_tf_init(ap, &qc->tf, dev->devno); + + if (dev->flags & ATA_DFLAG_LBA48) + qc->tf.flags |= ATA_TFLAG_LBA48; + } + + return qc; +} + +static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat) +{ + return 0; +} + +static void __ata_qc_complete(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int tag, do_clear = 0; + + qc->flags = 0; + tag = qc->tag; + if (likely(ata_tag_valid(tag))) { + if (tag == ap->active_tag) + ap->active_tag = ATA_TAG_POISON; + qc->tag = ATA_TAG_POISON; + do_clear = 1; + } + + if (qc->waiting) { + struct completion *waiting = qc->waiting; + qc->waiting = NULL; + complete(waiting); + } + + if (likely(do_clear)) + clear_bit(tag, &ap->qactive); +} + +/** + * ata_qc_free - free unused ata_queued_cmd + * @qc: Command to complete + * + * Designed to free unused ata_queued_cmd object + * in case something prevents using it. + * + * LOCKING: + * + */ +void ata_qc_free(struct ata_queued_cmd *qc) +{ + assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ + assert(qc->waiting == NULL); /* nothing should be waiting */ + + __ata_qc_complete(qc); +} + +/** + * ata_qc_complete - Complete an active ATA command + * @qc: Command to complete + * @drv_stat: ATA status register contents + * + * LOCKING: + * + */ + +void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) +{ + int rc; + + assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ + assert(qc->flags & ATA_QCFLAG_ACTIVE); + + if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) + ata_sg_clean(qc); + + /* call completion callback */ + rc = qc->complete_fn(qc, drv_stat); + + /* if callback indicates not to complete command (non-zero), + * return immediately + */ + if (rc != 0) + return; + + __ata_qc_complete(qc); + + VPRINTK("EXIT\n"); +} + +static inline int ata_should_dma_map(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + switch (qc->tf.protocol) { + case ATA_PROT_DMA: + case ATA_PROT_ATAPI_DMA: + return 1; + + case ATA_PROT_ATAPI: + case ATA_PROT_PIO: + case ATA_PROT_PIO_MULT: + if (ap->flags & ATA_FLAG_PIO_DMA) + return 1; + + /* fall through */ + + default: + return 0; + } + + /* never reached */ +} + +/** + * ata_qc_issue - issue taskfile to device + * @qc: command to issue to device + * + * Prepare an ATA command to submission to device. + * This includes mapping the data into a DMA-able + * area, filling in the S/G table, and finally + * writing the taskfile to hardware, starting the command. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * Zero on success, negative on error. + */ + +int ata_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + if (ata_should_dma_map(qc)) { + if (qc->flags & ATA_QCFLAG_SG) { + if (ata_sg_setup(qc)) + goto err_out; + } else if (qc->flags & ATA_QCFLAG_SINGLE) { + if (ata_sg_setup_one(qc)) + goto err_out; + } + } else { + qc->flags &= ~ATA_QCFLAG_DMAMAP; + } + + ap->ops->qc_prep(qc); + + qc->ap->active_tag = qc->tag; + qc->flags |= ATA_QCFLAG_ACTIVE; + + return ap->ops->qc_issue(qc); + +err_out: + return -1; +} + +/** + * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner + * @qc: command to issue to device + * + * Using various libata functions and hooks, this function + * starts an ATA command. ATA commands are grouped into + * classes called "protocols", and issuing each type of protocol + * is slightly different. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * Zero on success, negative on error. + */ + +int ata_qc_issue_prot(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + ata_dev_select(ap, qc->dev->devno, 1, 0); + + switch (qc->tf.protocol) { + case ATA_PROT_NODATA: + ata_tf_to_host_nolock(ap, &qc->tf); + break; + + case ATA_PROT_DMA: + ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ + ap->ops->bmdma_setup(qc); /* set up bmdma */ + ap->ops->bmdma_start(qc); /* initiate bmdma */ + break; + + case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ + ata_qc_set_polling(qc); + ata_tf_to_host_nolock(ap, &qc->tf); + ap->pio_task_state = PIO_ST; + queue_work(ata_wq, &ap->pio_task); + break; + + case ATA_PROT_ATAPI: + ata_qc_set_polling(qc); + ata_tf_to_host_nolock(ap, &qc->tf); + queue_work(ata_wq, &ap->packet_task); + break; + + case ATA_PROT_ATAPI_NODATA: + ata_tf_to_host_nolock(ap, &qc->tf); + queue_work(ata_wq, &ap->packet_task); + break; + + case ATA_PROT_ATAPI_DMA: + ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ + ap->ops->bmdma_setup(qc); /* set up bmdma */ + queue_work(ata_wq, &ap->packet_task); + break; + + default: + WARN_ON(1); + return -1; + } + + return 0; +} + +/** + * ata_bmdma_setup - Set up PCI IDE BMDMA transaction + * @qc: Info associated with this ATA transaction. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); + u8 dmactl; + void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; + + /* load PRD table addr. */ + mb(); /* make sure PRD table writes are visible to controller */ + writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); + + /* specify data direction, triple-check start bit is clear */ + dmactl = readb(mmio + ATA_DMA_CMD); + dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); + if (!rw) + dmactl |= ATA_DMA_WR; + writeb(dmactl, mmio + ATA_DMA_CMD); + + /* issue r/w command */ + ap->ops->exec_command(ap, &qc->tf); +} + +/** + * ata_bmdma_start - Start a PCI IDE BMDMA transaction + * @qc: Info associated with this ATA transaction. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; + u8 dmactl; + + /* start host DMA transaction */ + dmactl = readb(mmio + ATA_DMA_CMD); + writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); + + /* Strictly, one may wish to issue a readb() here, to + * flush the mmio write. However, control also passes + * to the hardware at this point, and it will interrupt + * us when we are to resume control. So, in effect, + * we don't care when the mmio write flushes. + * Further, a read of the DMA status register _immediately_ + * following the write may not be what certain flaky hardware + * is expected, so I think it is best to not add a readb() + * without first all the MMIO ATA cards/mobos. + * Or maybe I'm just being paranoid. + */ +} + +/** + * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO) + * @qc: Info associated with this ATA transaction. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); + u8 dmactl; + + /* load PRD table addr. */ + outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); + + /* specify data direction, triple-check start bit is clear */ + dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); + if (!rw) + dmactl |= ATA_DMA_WR; + outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + + /* issue r/w command */ + ap->ops->exec_command(ap, &qc->tf); +} + +/** + * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO) + * @qc: Info associated with this ATA transaction. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static void ata_bmdma_start_pio (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + u8 dmactl; + + /* start host DMA transaction */ + dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + outb(dmactl | ATA_DMA_START, + ap->ioaddr.bmdma_addr + ATA_DMA_CMD); +} + +void ata_bmdma_start(struct ata_queued_cmd *qc) +{ + if (qc->ap->flags & ATA_FLAG_MMIO) + ata_bmdma_start_mmio(qc); + else + ata_bmdma_start_pio(qc); +} + +void ata_bmdma_setup(struct ata_queued_cmd *qc) +{ + if (qc->ap->flags & ATA_FLAG_MMIO) + ata_bmdma_setup_mmio(qc); + else + ata_bmdma_setup_pio(qc); +} + +void ata_bmdma_irq_clear(struct ata_port *ap) +{ + if (ap->flags & ATA_FLAG_MMIO) { + void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS; + writeb(readb(mmio), mmio); + } else { + unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS; + outb(inb(addr), addr); + } + +} + +u8 ata_bmdma_status(struct ata_port *ap) +{ + u8 host_stat; + if (ap->flags & ATA_FLAG_MMIO) { + void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; + host_stat = readb(mmio + ATA_DMA_STATUS); + } else + host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + return host_stat; +} + +void ata_bmdma_stop(struct ata_port *ap) +{ + if (ap->flags & ATA_FLAG_MMIO) { + void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; + + /* clear start/stop bit */ + writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, + mmio + ATA_DMA_CMD); + } else { + /* clear start/stop bit */ + outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START, + ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + } + + /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ + ata_altstatus(ap); /* dummy read */ +} + +/** + * ata_host_intr - Handle host interrupt for given (port, task) + * @ap: Port on which interrupt arrived (possibly...) + * @qc: Taskfile currently active in engine + * + * Handle host interrupt for given queued command. Currently, + * only DMA interrupts are handled. All other commands are + * handled via polling with interrupts disabled (nIEN bit). + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * One if interrupt was handled, zero if not (shared irq). + */ + +inline unsigned int ata_host_intr (struct ata_port *ap, + struct ata_queued_cmd *qc) +{ + u8 status, host_stat; + + switch (qc->tf.protocol) { + + case ATA_PROT_DMA: + case ATA_PROT_ATAPI_DMA: + case ATA_PROT_ATAPI: + /* check status of DMA engine */ + host_stat = ap->ops->bmdma_status(ap); + VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); + + /* if it's not our irq... */ + if (!(host_stat & ATA_DMA_INTR)) + goto idle_irq; + + /* before we do anything else, clear DMA-Start bit */ + ap->ops->bmdma_stop(ap); + + /* fall through */ + + case ATA_PROT_ATAPI_NODATA: + case ATA_PROT_NODATA: + /* check altstatus */ + status = ata_altstatus(ap); + if (status & ATA_BUSY) + goto idle_irq; + + /* check main status, clearing INTRQ */ + status = ata_chk_status(ap); + if (unlikely(status & ATA_BUSY)) + goto idle_irq; + DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", + ap->id, qc->tf.protocol, status); + + /* ack bmdma irq events */ + ap->ops->irq_clear(ap); + + /* complete taskfile transaction */ + ata_qc_complete(qc, status); + break; + + default: + goto idle_irq; + } + + return 1; /* irq handled */ + +idle_irq: + ap->stats.idle_irq++; + +#ifdef ATA_IRQ_TRAP + if ((ap->stats.idle_irq % 1000) == 0) { + handled = 1; + ata_irq_ack(ap, 0); /* debug trap */ + printk(KERN_WARNING "ata%d: irq trap\n", ap->id); + } +#endif + return 0; /* irq not handled */ +} + +/** + * ata_interrupt - Default ATA host interrupt handler + * @irq: irq line + * @dev_instance: pointer to our host information structure + * @regs: unused + * + * LOCKING: + * + * RETURNS: + * + */ + +irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ata_host_set *host_set = dev_instance; + unsigned int i; + unsigned int handled = 0; + unsigned long flags; + + /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ + spin_lock_irqsave(&host_set->lock, flags); + + for (i = 0; i < host_set->n_ports; i++) { + struct ata_port *ap; + + ap = host_set->ports[i]; + if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (qc && (!(qc->tf.ctl & ATA_NIEN))) + handled |= ata_host_intr(ap, qc); + } + } + + spin_unlock_irqrestore(&host_set->lock, flags); + + return IRQ_RETVAL(handled); +} + +/** + * atapi_packet_task - Write CDB bytes to hardware + * @_data: Port to which ATAPI device is attached. + * + * When device has indicated its readiness to accept + * a CDB, this function is called. Send the CDB. + * If DMA is to be performed, exit immediately. + * Otherwise, we are in polling mode, so poll + * status under operation succeeds or fails. + * + * LOCKING: + * Kernel thread context (may sleep) + */ + +static void atapi_packet_task(void *_data) +{ + struct ata_port *ap = _data; + struct ata_queued_cmd *qc; + u8 status; + + qc = ata_qc_from_tag(ap, ap->active_tag); + assert(qc != NULL); + assert(qc->flags & ATA_QCFLAG_ACTIVE); + + /* sleep-wait for BSY to clear */ + DPRINTK("busy wait\n"); + if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) + goto err_out; + + /* make sure DRQ is set */ + status = ata_chk_status(ap); + if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) + goto err_out; + + /* send SCSI cdb */ + DPRINTK("send cdb\n"); + assert(ap->cdb_len >= 12); + ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); + + /* if we are DMA'ing, irq handler takes over from here */ + if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) + ap->ops->bmdma_start(qc); /* initiate bmdma */ + + /* non-data commands are also handled via irq */ + else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { + /* do nothing */ + } + + /* PIO commands are handled by polling */ + else { + ap->pio_task_state = PIO_ST; + queue_work(ata_wq, &ap->pio_task); + } + + return; + +err_out: + ata_qc_complete(qc, ATA_ERR); +} + +int ata_port_start (struct ata_port *ap) +{ + struct device *dev = ap->host_set->dev; + + ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL); + if (!ap->prd) + return -ENOMEM; + + DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); + + return 0; +} + +void ata_port_stop (struct ata_port *ap) +{ + struct device *dev = ap->host_set->dev; + + dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); +} + +/** + * ata_host_remove - Unregister SCSI host structure with upper layers + * @ap: Port to unregister + * @do_unregister: 1 if we fully unregister, 0 to just stop the port + * + * LOCKING: + */ + +static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister) +{ + struct Scsi_Host *sh = ap->host; + + DPRINTK("ENTER\n"); + + if (do_unregister) + scsi_remove_host(sh); + + ap->ops->port_stop(ap); +} + +/** + * ata_host_init - Initialize an ata_port structure + * @ap: Structure to initialize + * @host: associated SCSI mid-layer structure + * @host_set: Collection of hosts to which @ap belongs + * @ent: Probe information provided by low-level driver + * @port_no: Port number associated with this ata_port + * + * LOCKING: + * + */ + +static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, + struct ata_host_set *host_set, + struct ata_probe_ent *ent, unsigned int port_no) +{ + unsigned int i; + + host->max_id = 16; + host->max_lun = 1; + host->max_channel = 1; + host->unique_id = ata_unique_id++; + host->max_cmd_len = 12; + scsi_set_device(host, ent->dev); + scsi_assign_lock(host, &host_set->lock); + + ap->flags = ATA_FLAG_PORT_DISABLED; + ap->id = host->unique_id; + ap->host = host; + ap->ctl = ATA_DEVCTL_OBS; + ap->host_set = host_set; + ap->port_no = port_no; + ap->hard_port_no = + ent->legacy_mode ? ent->hard_port_no : port_no; + ap->pio_mask = ent->pio_mask; + ap->mwdma_mask = ent->mwdma_mask; + ap->udma_mask = ent->udma_mask; + ap->flags |= ent->host_flags; + ap->ops = ent->port_ops; + ap->cbl = ATA_CBL_NONE; + ap->active_tag = ATA_TAG_POISON; + ap->last_ctl = 0xFF; + + INIT_WORK(&ap->packet_task, atapi_packet_task, ap); + INIT_WORK(&ap->pio_task, ata_pio_task, ap); + + for (i = 0; i < ATA_MAX_DEVICES; i++) + ap->device[i].devno = i; + +#ifdef ATA_IRQ_TRAP + ap->stats.unhandled_irq = 1; + ap->stats.idle_irq = 1; +#endif + + memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports)); +} + +/** + * ata_host_add - Attach low-level ATA driver to system + * @ent: Information provided by low-level driver + * @host_set: Collections of ports to which we add + * @port_no: Port number associated with this host + * + * LOCKING: + * + * RETURNS: + * + */ + +static struct ata_port * ata_host_add(struct ata_probe_ent *ent, + struct ata_host_set *host_set, + unsigned int port_no) +{ + struct Scsi_Host *host; + struct ata_port *ap; + int rc; + + DPRINTK("ENTER\n"); + host = scsi_host_alloc(ent->sht, sizeof(struct ata_port)); + if (!host) + return NULL; + + ap = (struct ata_port *) &host->hostdata[0]; + + ata_host_init(ap, host, host_set, ent, port_no); + + rc = ap->ops->port_start(ap); + if (rc) + goto err_out; + + return ap; + +err_out: + scsi_host_put(host); + return NULL; +} + +/** + * ata_device_add - + * @ent: + * + * LOCKING: + * + * RETURNS: + * + */ + +int ata_device_add(struct ata_probe_ent *ent) +{ + unsigned int count = 0, i; + struct device *dev = ent->dev; + struct ata_host_set *host_set; + + DPRINTK("ENTER\n"); + /* alloc a container for our list of ATA ports (buses) */ + host_set = kmalloc(sizeof(struct ata_host_set) + + (ent->n_ports * sizeof(void *)), GFP_KERNEL); + if (!host_set) + return 0; + memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *))); + spin_lock_init(&host_set->lock); + + host_set->dev = dev; + host_set->n_ports = ent->n_ports; + host_set->irq = ent->irq; + host_set->mmio_base = ent->mmio_base; + host_set->private_data = ent->private_data; + host_set->ops = ent->port_ops; + + /* register each port bound to this device */ + for (i = 0; i < ent->n_ports; i++) { + struct ata_port *ap; + unsigned long xfer_mode_mask; + + ap = ata_host_add(ent, host_set, i); + if (!ap) + goto err_out; + + host_set->ports[i] = ap; + xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) | + (ap->mwdma_mask << ATA_SHIFT_MWDMA) | + (ap->pio_mask << ATA_SHIFT_PIO); + + /* print per-port info to dmesg */ + printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX " + "bmdma 0x%lX irq %lu\n", + ap->id, + ap->flags & ATA_FLAG_SATA ? 'S' : 'P', + ata_mode_string(xfer_mode_mask), + ap->ioaddr.cmd_addr, + ap->ioaddr.ctl_addr, + ap->ioaddr.bmdma_addr, + ent->irq); + + ata_chk_status(ap); + host_set->ops->irq_clear(ap); + count++; + } + + if (!count) { + kfree(host_set); + return 0; + } + + /* obtain irq, that is shared between channels */ + if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags, + DRV_NAME, host_set)) + goto err_out; + + /* perform each probe synchronously */ + DPRINTK("probe begin\n"); + for (i = 0; i < count; i++) { + struct ata_port *ap; + int rc; + + ap = host_set->ports[i]; + + DPRINTK("ata%u: probe begin\n", ap->id); + rc = ata_bus_probe(ap); + DPRINTK("ata%u: probe end\n", ap->id); + + if (rc) { + /* FIXME: do something useful here? + * Current libata behavior will + * tear down everything when + * the module is removed + * or the h/w is unplugged. + */ + } + + rc = scsi_add_host(ap->host, dev); + if (rc) { + printk(KERN_ERR "ata%u: scsi_add_host failed\n", + ap->id); + /* FIXME: do something useful here */ + /* FIXME: handle unconditional calls to + * scsi_scan_host and ata_host_remove, below, + * at the very least + */ + } + } + + /* probes are done, now scan each port's disk(s) */ + DPRINTK("probe begin\n"); + for (i = 0; i < count; i++) { + struct ata_port *ap = host_set->ports[i]; + + scsi_scan_host(ap->host); + } + + dev_set_drvdata(dev, host_set); + + VPRINTK("EXIT, returning %u\n", ent->n_ports); + return ent->n_ports; /* success */ + +err_out: + for (i = 0; i < count; i++) { + ata_host_remove(host_set->ports[i], 1); + scsi_host_put(host_set->ports[i]->host); + } + kfree(host_set); + VPRINTK("EXIT, returning 0\n"); + return 0; +} + +/** + * ata_scsi_release - SCSI layer callback hook for host unload + * @host: libata host to be unloaded + * + * Performs all duties necessary to shut down a libata port... + * Kill port kthread, disable port, and release resources. + * + * LOCKING: + * Inherited from SCSI layer. + * + * RETURNS: + * One. + */ + +int ata_scsi_release(struct Scsi_Host *host) +{ + struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; + + DPRINTK("ENTER\n"); + + ap->ops->port_disable(ap); + ata_host_remove(ap, 0); + + DPRINTK("EXIT\n"); + return 1; +} + +/** + * ata_std_ports - initialize ioaddr with standard port offsets. + * @ioaddr: IO address structure to be initialized + */ +void ata_std_ports(struct ata_ioports *ioaddr) +{ + ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; + ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; + ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; + ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; + ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; + ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; + ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; + ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; + ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; + ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; +} + +static struct ata_probe_ent * +ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port) +{ + struct ata_probe_ent *probe_ent; + + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (!probe_ent) { + printk(KERN_ERR DRV_NAME "(%s): out of memory\n", + kobject_name(&(dev->kobj))); + return NULL; + } + + memset(probe_ent, 0, sizeof(*probe_ent)); + + INIT_LIST_HEAD(&probe_ent->node); + probe_ent->dev = dev; + + probe_ent->sht = port->sht; + probe_ent->host_flags = port->host_flags; + probe_ent->pio_mask = port->pio_mask; + probe_ent->mwdma_mask = port->mwdma_mask; + probe_ent->udma_mask = port->udma_mask; + probe_ent->port_ops = port->port_ops; + + return probe_ent; +} + +#ifdef CONFIG_PCI +struct ata_probe_ent * +ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port) +{ + struct ata_probe_ent *probe_ent = + ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); + if (!probe_ent) + return NULL; + + probe_ent->n_ports = 2; + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + + probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); + probe_ent->port[0].altstatus_addr = + probe_ent->port[0].ctl_addr = + pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; + probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); + + probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); + probe_ent->port[1].altstatus_addr = + probe_ent->port[1].ctl_addr = + pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; + probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8; + + ata_std_ports(&probe_ent->port[0]); + ata_std_ports(&probe_ent->port[1]); + + return probe_ent; +} + +static struct ata_probe_ent * +ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port, + struct ata_probe_ent **ppe2) +{ + struct ata_probe_ent *probe_ent, *probe_ent2; + + probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); + if (!probe_ent) + return NULL; + probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]); + if (!probe_ent2) { + kfree(probe_ent); + return NULL; + } + + probe_ent->n_ports = 1; + probe_ent->irq = 14; + + probe_ent->hard_port_no = 0; + probe_ent->legacy_mode = 1; + + probe_ent2->n_ports = 1; + probe_ent2->irq = 15; + + probe_ent2->hard_port_no = 1; + probe_ent2->legacy_mode = 1; + + probe_ent->port[0].cmd_addr = 0x1f0; + probe_ent->port[0].altstatus_addr = + probe_ent->port[0].ctl_addr = 0x3f6; + probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); + + probe_ent2->port[0].cmd_addr = 0x170; + probe_ent2->port[0].altstatus_addr = + probe_ent2->port[0].ctl_addr = 0x376; + probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8; + + ata_std_ports(&probe_ent->port[0]); + ata_std_ports(&probe_ent2->port[0]); + + *ppe2 = probe_ent2; + return probe_ent; +} + +/** + * ata_pci_init_one - Initialize/register PCI IDE host controller + * @pdev: Controller to be initialized + * @port_info: Information from low-level host driver + * @n_ports: Number of ports attached to host controller + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * + */ + +int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, + unsigned int n_ports) +{ + struct ata_probe_ent *probe_ent, *probe_ent2 = NULL; + struct ata_port_info *port[2]; + u8 tmp8, mask; + unsigned int legacy_mode = 0; + int disable_dev_on_err = 1; + int rc; + + DPRINTK("ENTER\n"); + + port[0] = port_info[0]; + if (n_ports > 1) + port[1] = port_info[1]; + else + port[1] = port[0]; + + if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0 + && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { + /* TODO: support transitioning to native mode? */ + pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); + mask = (1 << 2) | (1 << 0); + if ((tmp8 & mask) != mask) + legacy_mode = (1 << 3); + } + + /* FIXME... */ + if ((!legacy_mode) && (n_ports > 1)) { + printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n"); + return -EINVAL; + } + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) { + disable_dev_on_err = 0; + goto err_out; + } + + if (legacy_mode) { + if (!request_region(0x1f0, 8, "libata")) { + struct resource *conflict, res; + res.start = 0x1f0; + res.end = 0x1f0 + 8 - 1; + conflict = ____request_resource(&ioport_resource, &res); + if (!strcmp(conflict->name, "libata")) + legacy_mode |= (1 << 0); + else { + disable_dev_on_err = 0; + printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n"); + } + } else + legacy_mode |= (1 << 0); + + if (!request_region(0x170, 8, "libata")) { + struct resource *conflict, res; + res.start = 0x170; + res.end = 0x170 + 8 - 1; + conflict = ____request_resource(&ioport_resource, &res); + if (!strcmp(conflict->name, "libata")) + legacy_mode |= (1 << 1); + else { + disable_dev_on_err = 0; + printk(KERN_WARNING "ata: 0x170 IDE port busy\n"); + } + } else + legacy_mode |= (1 << 1); + } + + /* we have legacy mode, but all ports are unavailable */ + if (legacy_mode == (1 << 3)) { + rc = -EBUSY; + goto err_out_regions; + } + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + + if (legacy_mode) { + probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2); + } else + probe_ent = ata_pci_init_native_mode(pdev, port); + if (!probe_ent) { + rc = -ENOMEM; + goto err_out_regions; + } + + pci_set_master(pdev); + + /* FIXME: check ata_device_add return */ + if (legacy_mode) { + if (legacy_mode & (1 << 0)) + ata_device_add(probe_ent); + if (legacy_mode & (1 << 1)) + ata_device_add(probe_ent2); + } else + ata_device_add(probe_ent); + + kfree(probe_ent); + kfree(probe_ent2); + + return 0; + +err_out_regions: + if (legacy_mode & (1 << 0)) + release_region(0x1f0, 8); + if (legacy_mode & (1 << 1)) + release_region(0x170, 8); + pci_release_regions(pdev); +err_out: + if (disable_dev_on_err) + pci_disable_device(pdev); + return rc; +} + +/** + * ata_pci_remove_one - PCI layer callback for device removal + * @pdev: PCI device that was removed + * + * PCI layer indicates to libata via this hook that + * hot-unplug or module unload event has occured. + * Handle this by unregistering all objects associated + * with this PCI device. Free those objects. Then finally + * release PCI resources and disable device. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + */ + +void ata_pci_remove_one (struct pci_dev *pdev) +{ + struct device *dev = pci_dev_to_dev(pdev); + struct ata_host_set *host_set = dev_get_drvdata(dev); + struct ata_port *ap; + unsigned int i; + + for (i = 0; i < host_set->n_ports; i++) { + ap = host_set->ports[i]; + + scsi_remove_host(ap->host); + } + + free_irq(host_set->irq, host_set); + if (host_set->ops->host_stop) + host_set->ops->host_stop(host_set); + if (host_set->mmio_base) + iounmap(host_set->mmio_base); + + for (i = 0; i < host_set->n_ports; i++) { + ap = host_set->ports[i]; + + ata_scsi_release(ap->host); + + if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) { + struct ata_ioports *ioaddr = &ap->ioaddr; + + if (ioaddr->cmd_addr == 0x1f0) + release_region(0x1f0, 8); + else if (ioaddr->cmd_addr == 0x170) + release_region(0x170, 8); + } + + scsi_host_put(ap->host); + } + + kfree(host_set); + + pci_release_regions(pdev); + pci_disable_device(pdev); + dev_set_drvdata(dev, NULL); +} + +/* move to PCI subsystem */ +int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits) +{ + unsigned long tmp = 0; + + switch (bits->width) { + case 1: { + u8 tmp8 = 0; + pci_read_config_byte(pdev, bits->reg, &tmp8); + tmp = tmp8; + break; + } + case 2: { + u16 tmp16 = 0; + pci_read_config_word(pdev, bits->reg, &tmp16); + tmp = tmp16; + break; + } + case 4: { + u32 tmp32 = 0; + pci_read_config_dword(pdev, bits->reg, &tmp32); + tmp = tmp32; + break; + } + + default: + return -EINVAL; + } + + tmp &= bits->mask; + + return (tmp == bits->val) ? 1 : 0; +} +#endif /* CONFIG_PCI */ + + +/** + * ata_init - + * + * LOCKING: + * + * RETURNS: + * + */ + +static int __init ata_init(void) +{ + ata_wq = create_workqueue("ata"); + if (!ata_wq) + return -ENOMEM; + + printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); + return 0; +} + +static void __exit ata_exit(void) +{ + destroy_workqueue(ata_wq); +} + +module_init(ata_init); +module_exit(ata_exit); + +/* + * libata is essentially a library of internal helper functions for + * low-level ATA host controller drivers. As such, the API/ABI is + * likely to change as new drivers are added and updated. + * Do not depend on ABI/API stability. + */ + +EXPORT_SYMBOL_GPL(ata_std_bios_param); +EXPORT_SYMBOL_GPL(ata_std_ports); +EXPORT_SYMBOL_GPL(ata_device_add); +EXPORT_SYMBOL_GPL(ata_sg_init); +EXPORT_SYMBOL_GPL(ata_sg_init_one); +EXPORT_SYMBOL_GPL(ata_qc_complete); +EXPORT_SYMBOL_GPL(ata_qc_issue_prot); +EXPORT_SYMBOL_GPL(ata_eng_timeout); +EXPORT_SYMBOL_GPL(ata_tf_load); +EXPORT_SYMBOL_GPL(ata_tf_read); +EXPORT_SYMBOL_GPL(ata_noop_dev_select); +EXPORT_SYMBOL_GPL(ata_std_dev_select); +EXPORT_SYMBOL_GPL(ata_tf_to_fis); +EXPORT_SYMBOL_GPL(ata_tf_from_fis); +EXPORT_SYMBOL_GPL(ata_check_status); +EXPORT_SYMBOL_GPL(ata_altstatus); +EXPORT_SYMBOL_GPL(ata_chk_err); +EXPORT_SYMBOL_GPL(ata_exec_command); +EXPORT_SYMBOL_GPL(ata_port_start); +EXPORT_SYMBOL_GPL(ata_port_stop); +EXPORT_SYMBOL_GPL(ata_interrupt); +EXPORT_SYMBOL_GPL(ata_qc_prep); +EXPORT_SYMBOL_GPL(ata_bmdma_setup); +EXPORT_SYMBOL_GPL(ata_bmdma_start); +EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); +EXPORT_SYMBOL_GPL(ata_bmdma_status); +EXPORT_SYMBOL_GPL(ata_bmdma_stop); +EXPORT_SYMBOL_GPL(ata_port_probe); +EXPORT_SYMBOL_GPL(sata_phy_reset); +EXPORT_SYMBOL_GPL(__sata_phy_reset); +EXPORT_SYMBOL_GPL(ata_bus_reset); +EXPORT_SYMBOL_GPL(ata_port_disable); +EXPORT_SYMBOL_GPL(ata_scsi_ioctl); +EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); +EXPORT_SYMBOL_GPL(ata_scsi_error); +EXPORT_SYMBOL_GPL(ata_scsi_slave_config); +EXPORT_SYMBOL_GPL(ata_scsi_release); +EXPORT_SYMBOL_GPL(ata_host_intr); +EXPORT_SYMBOL_GPL(ata_dev_classify); +EXPORT_SYMBOL_GPL(ata_dev_id_string); +EXPORT_SYMBOL_GPL(ata_scsi_simulate); + +#ifdef CONFIG_PCI +EXPORT_SYMBOL_GPL(pci_test_config_bits); +EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); +EXPORT_SYMBOL_GPL(ata_pci_init_one); +EXPORT_SYMBOL_GPL(ata_pci_remove_one); +#endif /* CONFIG_PCI */ |