diff options
Diffstat (limited to 'drivers/ata')
34 files changed, 1646 insertions, 1135 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 4ad8675f5a1..d8046a113c3 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -309,7 +309,7 @@ config PATA_HPT3X2N If unsure, say N. config PATA_HPT3X3 - tristate "HPT 343/363 PATA support (Experimental)" + tristate "HPT 343/363 PATA support" depends on PCI help This option enables support for the HPT 343/363 @@ -317,6 +317,14 @@ config PATA_HPT3X3 If unsure, say N. +config PATA_HPT3X3_DMA + bool "HPT 343/363 DMA support (Experimental)" + depends on PATA_HPT3X3 + help + This option enables DMA support for the HPT343/363 + controllers. Enable with care as there are still some + problems with DMA on this chipset. + config PATA_ISAPNP tristate "ISA Plug and Play PATA support (Experimental)" depends on EXPERIMENTAL && ISAPNP diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 11e4eb9f304..06f212ff2b4 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -99,6 +99,7 @@ enum { HOST_CAP_SSC = (1 << 14), /* Slumber capable */ HOST_CAP_CLO = (1 << 24), /* Command List Override support */ HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ + HOST_CAP_SNTF = (1 << 29), /* SNotification register */ HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ @@ -113,11 +114,11 @@ enum { PORT_TFDATA = 0x20, /* taskfile data */ PORT_SIG = 0x24, /* device TF signature */ PORT_CMD_ISSUE = 0x38, /* command issue */ - PORT_SCR = 0x28, /* SATA phy register block */ PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */ PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */ PORT_SCR_ERR = 0x30, /* SATA phy register: SError */ PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */ + PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */ /* PORT_IRQ_{STAT,MASK} bits */ PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ @@ -216,8 +217,8 @@ struct ahci_port_priv { unsigned int ncq_saw_sdb:1; }; -static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); +static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); static void ahci_irq_clear(struct ata_port *ap); @@ -417,7 +418,10 @@ static const struct pci_device_id ahci_pci_tbl[] = { /* ATI */ { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ - { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700 */ + { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700 IDE */ + { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb600 }, /* ATI SB700 AHCI */ + { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb600 }, /* ATI SB700 nraid5 */ + { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb600 }, /* ATI SB700 raid5 */ /* VIA */ { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ @@ -545,13 +549,19 @@ static void ahci_save_initial_config(struct pci_dev *pdev, hpriv->saved_cap = cap = readl(mmio + HOST_CAP); hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); - /* some chips lie about 64bit support */ + /* some chips have errata preventing 64bit use */ if ((cap & HOST_CAP_64) && (pi->flags & AHCI_FLAG_32BIT_ONLY)) { dev_printk(KERN_INFO, &pdev->dev, "controller can't do 64bit DMA, forcing 32bit\n"); cap &= ~HOST_CAP_64; } + if ((cap & HOST_CAP_NCQ) && (pi->flags & AHCI_FLAG_NO_NCQ)) { + dev_printk(KERN_INFO, &pdev->dev, + "controller can't do NCQ, turning off CAP_NCQ\n"); + cap &= ~HOST_CAP_NCQ; + } + /* fixup zero port_map */ if (!port_map) { port_map = (1 << ahci_nr_ports(cap)) - 1; @@ -625,38 +635,45 @@ static void ahci_restore_initial_config(struct ata_host *host) (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ } -static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in) +static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg) { - unsigned int sc_reg; - - switch (sc_reg_in) { - case SCR_STATUS: sc_reg = 0; break; - case SCR_CONTROL: sc_reg = 1; break; - case SCR_ERROR: sc_reg = 2; break; - case SCR_ACTIVE: sc_reg = 3; break; - default: - return 0xffffffffU; - } + static const int offset[] = { + [SCR_STATUS] = PORT_SCR_STAT, + [SCR_CONTROL] = PORT_SCR_CTL, + [SCR_ERROR] = PORT_SCR_ERR, + [SCR_ACTIVE] = PORT_SCR_ACT, + [SCR_NOTIFICATION] = PORT_SCR_NTF, + }; + struct ahci_host_priv *hpriv = ap->host->private_data; - return readl(ap->ioaddr.scr_addr + (sc_reg * 4)); + if (sc_reg < ARRAY_SIZE(offset) && + (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF))) + return offset[sc_reg]; + return 0; } - -static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in, - u32 val) +static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) { - unsigned int sc_reg; - - switch (sc_reg_in) { - case SCR_STATUS: sc_reg = 0; break; - case SCR_CONTROL: sc_reg = 1; break; - case SCR_ERROR: sc_reg = 2; break; - case SCR_ACTIVE: sc_reg = 3; break; - default: - return; + void __iomem *port_mmio = ahci_port_base(ap); + int offset = ahci_scr_offset(ap, sc_reg); + + if (offset) { + *val = readl(port_mmio + offset); + return 0; } + return -EINVAL; +} - writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); +static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) +{ + void __iomem *port_mmio = ahci_port_base(ap); + int offset = ahci_scr_offset(ap, sc_reg); + + if (offset) { + writel(val, port_mmio + offset); + return 0; + } + return -EINVAL; } static void ahci_start_engine(struct ata_port *ap) @@ -948,37 +965,87 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); } -static int ahci_clo(struct ata_port *ap) +static int ahci_kick_engine(struct ata_port *ap, int force_restart) { void __iomem *port_mmio = ap->ioaddr.cmd_addr; struct ahci_host_priv *hpriv = ap->host->private_data; u32 tmp; + int busy, rc; + + /* do we need to kick the port? */ + busy = ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ); + if (!busy && !force_restart) + return 0; + + /* stop engine */ + rc = ahci_stop_engine(ap); + if (rc) + goto out_restart; - if (!(hpriv->cap & HOST_CAP_CLO)) - return -EOPNOTSUPP; + /* need to do CLO? */ + if (!busy) { + rc = 0; + goto out_restart; + } + if (!(hpriv->cap & HOST_CAP_CLO)) { + rc = -EOPNOTSUPP; + goto out_restart; + } + + /* perform CLO */ tmp = readl(port_mmio + PORT_CMD); tmp |= PORT_CMD_CLO; writel(tmp, port_mmio + PORT_CMD); + rc = 0; tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); if (tmp & PORT_CMD_CLO) - return -EIO; + rc = -EIO; - return 0; + /* restart engine */ + out_restart: + ahci_start_engine(ap); + return rc; } -static int ahci_softreset(struct ata_port *ap, unsigned int *class, - unsigned long deadline) +static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, + struct ata_taskfile *tf, int is_cmd, u16 flags, + unsigned long timeout_msec) { + const u32 cmd_fis_len = 5; /* five dwords */ struct ahci_port_priv *pp = ap->private_data; void __iomem *port_mmio = ahci_port_base(ap); - const u32 cmd_fis_len = 5; /* five dwords */ + u8 *fis = pp->cmd_tbl; + u32 tmp; + + /* prep the command */ + ata_tf_to_fis(tf, pmp, is_cmd, fis); + ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); + + /* issue & wait */ + writel(1, port_mmio + PORT_CMD_ISSUE); + + if (timeout_msec) { + tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, + 1, timeout_msec); + if (tmp & 0x1) { + ahci_kick_engine(ap, 1); + return -EBUSY; + } + } else + readl(port_mmio + PORT_CMD_ISSUE); /* flush */ + + return 0; +} + +static int ahci_do_softreset(struct ata_port *ap, unsigned int *class, + int pmp, unsigned long deadline) +{ const char *reason = NULL; + unsigned long now, msecs; struct ata_taskfile tf; - u32 tmp; - u8 *fis; int rc; DPRINTK("ENTER\n"); @@ -990,43 +1057,22 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class, } /* prepare for SRST (AHCI-1.1 10.4.1) */ - rc = ahci_stop_engine(ap); - if (rc) { - reason = "failed to stop engine"; - goto fail_restart; - } - - /* check BUSY/DRQ, perform Command List Override if necessary */ - if (ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ)) { - rc = ahci_clo(ap); - - if (rc == -EOPNOTSUPP) { - reason = "port busy but CLO unavailable"; - goto fail_restart; - } else if (rc) { - reason = "port busy but CLO failed"; - goto fail_restart; - } - } - - /* restart engine */ - ahci_start_engine(ap); + rc = ahci_kick_engine(ap, 1); + if (rc) + ata_port_printk(ap, KERN_WARNING, + "failed to reset engine (errno=%d)", rc); ata_tf_init(ap->device, &tf); - fis = pp->cmd_tbl; /* issue the first D2H Register FIS */ - ahci_fill_cmd_slot(pp, 0, - cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY); + msecs = 0; + now = jiffies; + if (time_after(now, deadline)) + msecs = jiffies_to_msecs(deadline - now); tf.ctl |= ATA_SRST; - ata_tf_to_fis(&tf, fis, 0); - fis[1] &= ~(1 << 7); /* turn off Command FIS bit */ - - writel(1, port_mmio + PORT_CMD_ISSUE); - - tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500); - if (tmp & 0x1) { + if (ahci_exec_polled_cmd(ap, pmp, &tf, 0, + AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) { rc = -EIO; reason = "1st FIS failed"; goto fail; @@ -1036,14 +1082,8 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class, msleep(1); /* issue the second D2H Register FIS */ - ahci_fill_cmd_slot(pp, 0, cmd_fis_len); - tf.ctl &= ~ATA_SRST; - ata_tf_to_fis(&tf, fis, 0); - fis[1] &= ~(1 << 7); /* turn off Command FIS bit */ - - writel(1, port_mmio + PORT_CMD_ISSUE); - readl(port_mmio + PORT_CMD_ISSUE); /* flush */ + ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); /* spec mandates ">= 2ms" before checking status. * We wait 150ms, because that was the magic delay used for @@ -1066,13 +1106,17 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class, DPRINTK("EXIT, class=%u\n", *class); return 0; - fail_restart: - ahci_start_engine(ap); fail: ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason); return rc; } +static int ahci_softreset(struct ata_port *ap, unsigned int *class, + unsigned long deadline) +{ + return ahci_do_softreset(ap, class, 0, deadline); +} + static int ahci_hardreset(struct ata_port *ap, unsigned int *class, unsigned long deadline) { @@ -1088,7 +1132,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class, /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(ap->device, &tf); tf.command = 0x80; - ata_tf_to_fis(&tf, d2h_fis, 0); + ata_tf_to_fis(&tf, 0, 0, d2h_fis); rc = sata_std_hardreset(ap, class, deadline); @@ -1106,6 +1150,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class, static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class, unsigned long deadline) { + u32 serror; int rc; DPRINTK("ENTER\n"); @@ -1116,7 +1161,8 @@ static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class, deadline); /* vt8251 needs SError cleared for the port to operate */ - ahci_scr_write(ap, SCR_ERROR, ahci_scr_read(ap, SCR_ERROR)); + ahci_scr_read(ap, SCR_ERROR, &serror); + ahci_scr_write(ap, SCR_ERROR, serror); ahci_start_engine(ap); @@ -1205,7 +1251,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) */ cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; - ata_tf_to_fis(&qc->tf, cmd_tbl, 0); + ata_tf_to_fis(&qc->tf, 0, 1, cmd_tbl); if (is_atapi) { memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); @@ -1238,7 +1284,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) ata_ehi_clear_desc(ehi); /* AHCI needs SError cleared; otherwise, it might lock up */ - serror = ahci_scr_read(ap, SCR_ERROR); + ahci_scr_read(ap, SCR_ERROR, &serror); ahci_scr_write(ap, SCR_ERROR, serror); /* analyze @irq_stat */ @@ -1262,12 +1308,12 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) if (irq_stat & PORT_IRQ_IF_ERR) { err_mask |= AC_ERR_ATA_BUS; action |= ATA_EH_SOFTRESET; - ata_ehi_push_desc(ehi, ", interface fatal error"); + ata_ehi_push_desc(ehi, "interface fatal error"); } if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { ata_ehi_hotplugged(ehi); - ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ? + ata_ehi_push_desc(ehi, "%s", irq_stat & PORT_IRQ_CONNECT ? "connection status changed" : "PHY RDY changed"); } @@ -1276,7 +1322,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) err_mask |= AC_ERR_HSM; action |= ATA_EH_SOFTRESET; - ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x", + ata_ehi_push_desc(ehi, "unknown FIS %08x %08x %08x %08x", unk[0], unk[1], unk[2], unk[3]); } @@ -1512,11 +1558,17 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - if (qc->flags & ATA_QCFLAG_FAILED) { - /* make DMA engine forget about the failed command */ - ahci_stop_engine(ap); - ahci_start_engine(ap); - } + /* make DMA engine forget about the failed command */ + if (qc->flags & ATA_QCFLAG_FAILED) + ahci_kick_engine(ap, 1); +} + +static int ahci_port_resume(struct ata_port *ap) +{ + ahci_power_up(ap); + ahci_start_port(ap); + + return 0; } #ifdef CONFIG_PM @@ -1536,14 +1588,6 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) return rc; } -static int ahci_port_resume(struct ata_port *ap) -{ - ahci_power_up(ap); - ahci_start_port(ap); - - return 0; -} - static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = dev_get_drvdata(&pdev->dev); @@ -1734,12 +1778,13 @@ static void ahci_print_info(struct ata_host *host) dev_printk(KERN_INFO, &pdev->dev, "flags: " - "%s%s%s%s%s%s" - "%s%s%s%s%s%s%s\n" + "%s%s%s%s%s%s%s" + "%s%s%s%s%s%s%s\n" , cap & (1 << 31) ? "64bit " : "", cap & (1 << 30) ? "ncq " : "", + cap & (1 << 29) ? "sntf " : "", cap & (1 << 28) ? "ilck " : "", cap & (1 << 27) ? "stag " : "", cap & (1 << 26) ? "pm " : "", @@ -1794,7 +1839,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ahci_save_initial_config(pdev, &pi, hpriv); /* prepare host */ - if (!(pi.flags & AHCI_FLAG_NO_NCQ) && (hpriv->cap & HOST_CAP_NCQ)) + if (hpriv->cap & HOST_CAP_NCQ) pi.flags |= ATA_FLAG_NCQ; host = ata_host_alloc_pinfo(&pdev->dev, ppi, fls(hpriv->port_map)); @@ -1808,10 +1853,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *port_mmio = ahci_port_base(ap); /* standard SATA port setup */ - if (hpriv->port_map & (1 << i)) { + if (hpriv->port_map & (1 << i)) ap->ioaddr.cmd_addr = port_mmio; - ap->ioaddr.scr_addr = port_mmio + PORT_SCR; - } /* disabled/not-implemented port */ else diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 6a3bfef58e1..d9fa329fd15 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -414,7 +414,7 @@ static const struct piix_map_db ich6m_map_db = { */ .map = { /* PM PS SM SS MAP */ - { P0, P2, RV, RV }, /* 00b */ + { P0, P2, NA, NA }, /* 00b */ { IDE, IDE, P1, P3 }, /* 01b */ { P0, P2, IDE, IDE }, /* 10b */ { RV, RV, RV, RV }, @@ -928,20 +928,18 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev) { struct pci_dev *pdev = NULL; u16 cfg; - u8 rev; int no_piix_dma = 0; while((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) { /* Look for 450NX PXB. Check for problem configurations A PCI quirk checks bit 6 already */ - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); pci_read_config_word(pdev, 0x41, &cfg); /* Only on the original revision: IDE DMA can hang */ - if (rev == 0x00) + if (pdev->revision == 0x00) no_piix_dma = 1; /* On all revisions below 5 PXB bus lock must be disabled for IDE */ - else if (cfg & (1<<14) && rev < 5) + else if (cfg & (1<<14) && pdev->revision < 5) no_piix_dma = 2; } if (no_piix_dma) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 5b25311ba88..6001aae0b88 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -71,6 +71,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev, u16 heads, u16 sectors); static unsigned int ata_dev_set_xfermode(struct ata_device *dev); static void ata_dev_xfermask(struct ata_device *dev); +static unsigned long ata_dev_blacklisted(const struct ata_device *dev); unsigned int ata_print_id = 1; static struct workqueue_struct *ata_wq; @@ -110,8 +111,9 @@ MODULE_VERSION(DRV_VERSION); /** * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure * @tf: Taskfile to convert - * @fis: Buffer into which data will output * @pmp: Port multiplier port + * @is_cmd: This FIS is for command + * @fis: Buffer into which data will output * * Converts a standard ATA taskfile to a Serial ATA * FIS structure (Register - Host to Device). @@ -119,12 +121,13 @@ MODULE_VERSION(DRV_VERSION); * LOCKING: * Inherited from caller. */ - -void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp) +void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) { - fis[0] = 0x27; /* Register - Host to Device FIS */ - fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number, - bit 7 indicates Command FIS */ + fis[0] = 0x27; /* Register - Host to Device FIS */ + fis[1] = pmp & 0xf; /* Port multiplier number*/ + if (is_cmd) + fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ + fis[2] = tf->command; fis[3] = tf->feature; @@ -1283,18 +1286,11 @@ static unsigned int ata_id_xfermask(const u16 *id) void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data, unsigned long delay) { - int rc; - - if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) - return; - PREPARE_DELAYED_WORK(&ap->port_task, fn); ap->port_task_data = data; - rc = queue_delayed_work(ata_wq, &ap->port_task, delay); - - /* rc == 0 means that another user is using port task */ - WARN_ON(rc == 0); + /* may fail if ata_port_flush_task() in progress */ + queue_delayed_work(ata_wq, &ap->port_task, delay); } /** @@ -1309,32 +1305,9 @@ void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data, */ void ata_port_flush_task(struct ata_port *ap) { - unsigned long flags; - DPRINTK("ENTER\n"); - spin_lock_irqsave(ap->lock, flags); - ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK; - spin_unlock_irqrestore(ap->lock, flags); - - DPRINTK("flush #1\n"); - cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */ - - /* - * At this point, if a task is running, it's guaranteed to see - * the FLUSH flag; thus, it will never queue pio tasks again. - * Cancel and flush. - */ - if (!cancel_delayed_work(&ap->port_task)) { - if (ata_msg_ctl(ap)) - ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", - __FUNCTION__); - cancel_work_sync(&ap->port_task.work); - } - - spin_lock_irqsave(ap->lock, flags); - ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK; - spin_unlock_irqrestore(ap->lock, flags); + cancel_rearming_delayed_work(&ap->port_task); if (ata_msg_ctl(ap)) ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__); @@ -1814,7 +1787,7 @@ static void ata_dev_config_ncq(struct ata_device *dev, desc[0] = '\0'; return; } - if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) { + if (dev->horkage & ATA_HORKAGE_NONCQ) { snprintf(desc, desc_sz, "NCQ (not used)"); return; } @@ -1863,6 +1836,9 @@ int ata_dev_configure(struct ata_device *dev) if (ata_msg_probe(ap)) ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__); + /* set horkage */ + dev->horkage |= ata_dev_blacklisted(dev); + /* let ACPI work its magic */ rc = ata_acpi_on_devcfg(dev); if (rc) @@ -2038,7 +2014,7 @@ int ata_dev_configure(struct ata_device *dev) dev->max_sectors = ATA_MAX_SECTORS; } - if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128) + if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, dev->max_sectors); @@ -2413,21 +2389,35 @@ int sata_down_spd_limit(struct ata_port *ap) u32 sstatus, spd, mask; int rc, highbit; + if (!sata_scr_valid(ap)) + return -EOPNOTSUPP; + + /* If SCR can be read, use it to determine the current SPD. + * If not, use cached value in ap->sata_spd. + */ rc = sata_scr_read(ap, SCR_STATUS, &sstatus); - if (rc) - return rc; + if (rc == 0) + spd = (sstatus >> 4) & 0xf; + else + spd = ap->sata_spd; mask = ap->sata_spd_limit; if (mask <= 1) return -EINVAL; + + /* unconditionally mask off the highest bit */ highbit = fls(mask) - 1; mask &= ~(1 << highbit); - spd = (sstatus >> 4) & 0xf; - if (spd <= 1) - return -EINVAL; - spd--; - mask &= (1 << spd) - 1; + /* Mask off all speeds higher than or equal to the current + * one. Force 1.5Gbps if current SPD is not available. + */ + if (spd > 1) + mask &= (1 << (spd - 1)) - 1; + else + mask &= 1; + + /* were we already at the bottom? */ if (!mask) return -EINVAL; @@ -3190,9 +3180,6 @@ void ata_bus_reset(struct ata_port *ap) if ((slave_possible) && (err != 0x81)) ap->device[1].class = ata_dev_try_classify(ap, 1, &err); - /* re-enable interrupts */ - ap->ops->irq_on(ap); - /* is double-select really necessary? */ if (ap->device[1].class != ATA_DEV_NONE) ap->ops->dev_select(ap, 1); @@ -3280,9 +3267,11 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params, last = cur; last_jiffies = jiffies; - /* check deadline */ + /* Check deadline. If debouncing failed, return + * -EPIPE to tell upper layer to lower link speed. + */ if (time_after(jiffies, deadline)) - return -EBUSY; + return -EPIPE; } } @@ -3577,10 +3566,6 @@ void ata_std_postreset(struct ata_port *ap, unsigned int *classes) if (sata_scr_read(ap, SCR_ERROR, &serror) == 0) sata_scr_write(ap, SCR_ERROR, serror); - /* re-enable interrupts */ - if (!ap->ops->error_handler) - ap->ops->irq_on(ap); - /* is double-select really necessary? */ if (classes[0] != ATA_DEV_NONE) ap->ops->dev_select(ap, 1); @@ -3770,6 +3755,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA }, { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */ + { "IOMEGA ZIP 250 ATAPI Floppy", + NULL, ATA_HORKAGE_NODMA }, /* Weird ATAPI devices */ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, @@ -3783,7 +3770,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, /* NCQ is broken */ { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ }, + { "Maxtor 6B200M0", "BANC1BM0", ATA_HORKAGE_NONCQ }, { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ }, + { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI", + ATA_HORKAGE_NONCQ }, /* NCQ hard hangs device under heavier load, needs hard power cycle */ { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ }, /* Blacklist entries taken from Silicon Image 3124/3132 @@ -3796,6 +3786,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, }, { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, }, { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, + { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, + { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, }, /* Devices with NCQ limits */ @@ -3803,7 +3795,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { } }; -unsigned long ata_device_blacklisted(const struct ata_device *dev) +static unsigned long ata_dev_blacklisted(const struct ata_device *dev) { unsigned char model_num[ATA_ID_PROD_LEN + 1]; unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; @@ -3833,7 +3825,7 @@ static int ata_dma_blacklisted(const struct ata_device *dev) if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) && (dev->flags & ATA_DFLAG_CDB_INTR)) return 1; - return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0; + return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; } /** @@ -5756,10 +5748,8 @@ int sata_scr_valid(struct ata_port *ap) */ int sata_scr_read(struct ata_port *ap, int reg, u32 *val) { - if (sata_scr_valid(ap)) { - *val = ap->ops->scr_read(ap, reg); - return 0; - } + if (sata_scr_valid(ap)) + return ap->ops->scr_read(ap, reg, val); return -EOPNOTSUPP; } @@ -5781,10 +5771,8 @@ int sata_scr_read(struct ata_port *ap, int reg, u32 *val) */ int sata_scr_write(struct ata_port *ap, int reg, u32 val) { - if (sata_scr_valid(ap)) { - ap->ops->scr_write(ap, reg, val); - return 0; - } + if (sata_scr_valid(ap)) + return ap->ops->scr_write(ap, reg, val); return -EOPNOTSUPP; } @@ -5805,10 +5793,13 @@ int sata_scr_write(struct ata_port *ap, int reg, u32 val) */ int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val) { + int rc; + if (sata_scr_valid(ap)) { - ap->ops->scr_write(ap, reg, val); - ap->ops->scr_read(ap, reg); - return 0; + rc = ap->ops->scr_write(ap, reg, val); + if (rc == 0) + rc = ap->ops->scr_read(ap, reg, &val); + return rc; } return -EOPNOTSUPP; } @@ -6020,6 +6011,7 @@ void ata_dev_init(struct ata_device *dev) /* SATA spd limit is bound to the first device */ ap->sata_spd_limit = ap->hw_sata_spd_limit; + ap->sata_spd = 0; /* High bits of dev->flags are used to record warm plug * requests which occur asynchronously. Synchronize using @@ -6085,6 +6077,9 @@ struct ata_port *ata_port_alloc(struct ata_host *host) INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); INIT_LIST_HEAD(&ap->eh_done_q); init_waitqueue_head(&ap->eh_wait_q); + init_timer_deferrable(&ap->fastdrain_timer); + ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; + ap->fastdrain_timer.data = (unsigned long)ap; ap->cbl = ATA_CBL_NONE; @@ -6461,7 +6456,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; - ata_scsi_scan_host(ap); + ata_scsi_scan_host(ap, 1); } return 0; @@ -6557,13 +6552,7 @@ void ata_port_detach(struct ata_port *ap) spin_unlock_irqrestore(ap->lock, flags); ata_port_wait_eh(ap); - - /* Flush hotplug task. The sequence is similar to - * ata_port_flush_task(). - */ - cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */ - cancel_delayed_work(&ap->hotplug_task); - cancel_work_sync(&ap->hotplug_task.work); + cancel_rearming_delayed_work(&ap->hotplug_task); skip_eh: /* remove the associated SCSI host */ @@ -6952,7 +6941,6 @@ EXPORT_SYMBOL_GPL(ata_host_resume); EXPORT_SYMBOL_GPL(ata_id_string); EXPORT_SYMBOL_GPL(ata_id_c_string); EXPORT_SYMBOL_GPL(ata_id_to_dma_mode); -EXPORT_SYMBOL_GPL(ata_device_blacklisted); EXPORT_SYMBOL_GPL(ata_scsi_simulate); EXPORT_SYMBOL_GPL(ata_pio_need_iordy); @@ -6961,9 +6949,9 @@ EXPORT_SYMBOL_GPL(ata_timing_merge); #ifdef CONFIG_PCI EXPORT_SYMBOL_GPL(pci_test_config_bits); -EXPORT_SYMBOL_GPL(ata_pci_init_native_host); +EXPORT_SYMBOL_GPL(ata_pci_init_sff_host); EXPORT_SYMBOL_GPL(ata_pci_init_bmdma); -EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host); +EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host); EXPORT_SYMBOL_GPL(ata_pci_init_one); EXPORT_SYMBOL_GPL(ata_pci_remove_one); #ifdef CONFIG_PM @@ -6976,6 +6964,9 @@ EXPORT_SYMBOL_GPL(ata_pci_default_filter); EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); #endif /* CONFIG_PCI */ +EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); +EXPORT_SYMBOL_GPL(ata_ehi_push_desc); +EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); EXPORT_SYMBOL_GPL(ata_eng_timeout); EXPORT_SYMBOL_GPL(ata_port_schedule_eh); EXPORT_SYMBOL_GPL(ata_port_abort); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 9ee0a8c08d9..ac6ceed4bb6 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -56,6 +56,7 @@ enum { */ enum { ATA_EH_PRERESET_TIMEOUT = 10 * HZ, + ATA_EH_FASTDRAIN_INTERVAL = 3 * HZ, }; /* The following table determines how we sequence resets. Each entry @@ -85,6 +86,71 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) { } #endif /* CONFIG_PM */ +static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, + va_list args) +{ + ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, + ATA_EH_DESC_LEN - ehi->desc_len, + fmt, args); +} + +/** + * __ata_ehi_push_desc - push error description without adding separator + * @ehi: target EHI + * @fmt: printf format string + * + * Format string according to @fmt and append it to @ehi->desc. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + __ata_ehi_pushv_desc(ehi, fmt, args); + va_end(args); +} + +/** + * ata_ehi_push_desc - push error description with separator + * @ehi: target EHI + * @fmt: printf format string + * + * Format string according to @fmt and append it to @ehi->desc. + * If @ehi->desc is not empty, ", " is added in-between. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) +{ + va_list args; + + if (ehi->desc_len) + __ata_ehi_push_desc(ehi, ", "); + + va_start(args, fmt); + __ata_ehi_pushv_desc(ehi, fmt, args); + va_end(args); +} + +/** + * ata_ehi_clear_desc - clean error description + * @ehi: target EHI + * + * Clear @ehi->desc. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_ehi_clear_desc(struct ata_eh_info *ehi) +{ + ehi->desc[0] = '\0'; + ehi->desc_len = 0; +} + static void ata_ering_record(struct ata_ering *ering, int is_io, unsigned int err_mask) { @@ -296,6 +362,9 @@ void ata_scsi_error(struct Scsi_Host *host) repeat: /* invoke error handler */ if (ap->ops->error_handler) { + /* kill fast drain timer */ + del_timer_sync(&ap->fastdrain_timer); + /* process port resume request */ ata_eh_handle_port_resume(ap); @@ -511,6 +580,94 @@ void ata_eng_timeout(struct ata_port *ap) DPRINTK("EXIT\n"); } +static int ata_eh_nr_in_flight(struct ata_port *ap) +{ + unsigned int tag; + int nr = 0; + + /* count only non-internal commands */ + for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) + if (ata_qc_from_tag(ap, tag)) + nr++; + + return nr; +} + +void ata_eh_fastdrain_timerfn(unsigned long arg) +{ + struct ata_port *ap = (void *)arg; + unsigned long flags; + int cnt; + + spin_lock_irqsave(ap->lock, flags); + + cnt = ata_eh_nr_in_flight(ap); + + /* are we done? */ + if (!cnt) + goto out_unlock; + + if (cnt == ap->fastdrain_cnt) { + unsigned int tag; + + /* No progress during the last interval, tag all + * in-flight qcs as timed out and freeze the port. + */ + for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { + struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); + if (qc) + qc->err_mask |= AC_ERR_TIMEOUT; + } + + ata_port_freeze(ap); + } else { + /* some qcs have finished, give it another chance */ + ap->fastdrain_cnt = cnt; + ap->fastdrain_timer.expires = + jiffies + ATA_EH_FASTDRAIN_INTERVAL; + add_timer(&ap->fastdrain_timer); + } + + out_unlock: + spin_unlock_irqrestore(ap->lock, flags); +} + +/** + * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain + * @ap: target ATA port + * @fastdrain: activate fast drain + * + * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain + * is non-zero and EH wasn't pending before. Fast drain ensures + * that EH kicks in in timely manner. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) +{ + int cnt; + + /* already scheduled? */ + if (ap->pflags & ATA_PFLAG_EH_PENDING) + return; + + ap->pflags |= ATA_PFLAG_EH_PENDING; + + if (!fastdrain) + return; + + /* do we have in-flight qcs? */ + cnt = ata_eh_nr_in_flight(ap); + if (!cnt) + return; + + /* activate fast drain */ + ap->fastdrain_cnt = cnt; + ap->fastdrain_timer.expires = jiffies + ATA_EH_FASTDRAIN_INTERVAL; + add_timer(&ap->fastdrain_timer); +} + /** * ata_qc_schedule_eh - schedule qc for error handling * @qc: command to schedule error handling for @@ -528,7 +685,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc) WARN_ON(!ap->ops->error_handler); qc->flags |= ATA_QCFLAG_FAILED; - qc->ap->pflags |= ATA_PFLAG_EH_PENDING; + ata_eh_set_pending(ap, 1); /* The following will fail if timeout has already expired. * ata_scsi_error() takes care of such scmds on EH entry. @@ -555,7 +712,7 @@ void ata_port_schedule_eh(struct ata_port *ap) if (ap->pflags & ATA_PFLAG_INITIALIZING) return; - ap->pflags |= ATA_PFLAG_EH_PENDING; + ata_eh_set_pending(ap, 1); scsi_schedule_eh(ap->scsi_host); DPRINTK("port EH scheduled\n"); @@ -579,6 +736,9 @@ int ata_port_abort(struct ata_port *ap) WARN_ON(!ap->ops->error_handler); + /* we're gonna abort all commands, no need for fast drain */ + ata_eh_set_pending(ap, 0); + for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); @@ -1130,7 +1290,7 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap) /* we've got the perpetrator, condemn it */ qc = __ata_qc_from_tag(ap, tag); memcpy(&qc->result_tf, &tf, sizeof(tf)); - qc->err_mask |= AC_ERR_DEV; + qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; ehc->i.err_mask &= ~AC_ERR_DEV; } @@ -1413,8 +1573,12 @@ static void ata_eh_autopsy(struct ata_port *ap) if (rc == 0) { ehc->i.serror |= serror; ata_eh_analyze_serror(ap); - } else if (rc != -EOPNOTSUPP) + } else if (rc != -EOPNOTSUPP) { + /* SError read failed, force hardreset and probing */ + ata_ehi_schedule_probe(&ehc->i); ehc->i.action |= ATA_EH_HARDRESET; + ehc->i.err_mask |= AC_ERR_OTHER; + } /* analyze NCQ failure */ ata_eh_analyze_ncq_error(ap); @@ -1524,14 +1688,14 @@ static void ata_eh_report(struct ata_port *ap) ehc->i.err_mask, ap->sactive, ehc->i.serror, ehc->i.action, frozen); if (desc) - ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc); + ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); } else { ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x " "SAct 0x%x SErr 0x%x action 0x%x%s\n", ehc->i.err_mask, ap->sactive, ehc->i.serror, ehc->i.action, frozen); if (desc) - ata_port_printk(ap, KERN_ERR, "(%s)\n", desc); + ata_port_printk(ap, KERN_ERR, "%s\n", desc); } for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { @@ -1551,7 +1715,7 @@ static void ata_eh_report(struct ata_port *ap) "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " "tag %d cdb 0x%x data %u %s\n " "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " - "Emask 0x%x (%s)\n", + "Emask 0x%x (%s)%s\n", cmd->command, cmd->feature, cmd->nsect, cmd->lbal, cmd->lbam, cmd->lbah, cmd->hob_feature, cmd->hob_nsect, @@ -1562,7 +1726,8 @@ static void ata_eh_report(struct ata_port *ap) res->lbal, res->lbam, res->lbah, res->hob_feature, res->hob_nsect, res->hob_lbal, res->hob_lbam, res->hob_lbah, - res->device, qc->err_mask, ata_err_string(qc->err_mask)); + res->device, qc->err_mask, ata_err_string(qc->err_mask), + qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); } } @@ -1648,7 +1813,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, } else ata_port_printk(ap, KERN_ERR, "prereset failed (errno=%d)\n", rc); - return rc; + goto out; } } @@ -1661,7 +1826,8 @@ static int ata_eh_reset(struct ata_port *ap, int classify, /* prereset told us not to reset, bang classes and return */ for (i = 0; i < ATA_MAX_DEVICES; i++) classes[i] = ATA_DEV_NONE; - return 0; + rc = 0; + goto out; } /* did prereset() screw up? if so, fix up to avoid oopsing */ @@ -1697,7 +1863,8 @@ static int ata_eh_reset(struct ata_port *ap, int classify, ata_port_printk(ap, KERN_ERR, "follow-up softreset required " "but no softreset avaliable\n"); - return -EINVAL; + rc = -EINVAL; + goto out; } ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK); @@ -1707,7 +1874,8 @@ static int ata_eh_reset(struct ata_port *ap, int classify, classes[0] == ATA_DEV_UNKNOWN) { ata_port_printk(ap, KERN_ERR, "classification failed\n"); - return -EINVAL; + rc = -EINVAL; + goto out; } } @@ -1724,7 +1892,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, schedule_timeout_uninterruptible(delta); } - if (reset == hardreset && + if (rc == -EPIPE || try == ARRAY_SIZE(ata_eh_reset_timeouts) - 1) sata_down_spd_limit(ap); if (hardreset) @@ -1733,12 +1901,18 @@ static int ata_eh_reset(struct ata_port *ap, int classify, } if (rc == 0) { + u32 sstatus; + /* After the reset, the device state is PIO 0 and the * controller state is undefined. Record the mode. */ for (i = 0; i < ATA_MAX_DEVICES; i++) ap->device[i].pio_mode = XFER_PIO_0; + /* record current link speed */ + if (sata_scr_read(ap, SCR_STATUS, &sstatus) == 0) + ap->sata_spd = (sstatus >> 4) & 0xf; + if (postreset) postreset(ap, classes); @@ -1746,7 +1920,9 @@ static int ata_eh_reset(struct ata_port *ap, int classify, ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); ehc->i.action |= ATA_EH_REVALIDATE; } - + out: + /* clear hotplug flag */ + ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; return rc; } @@ -1897,6 +2073,57 @@ static int ata_eh_skip_recovery(struct ata_port *ap) return 1; } +static void ata_eh_handle_dev_fail(struct ata_device *dev, int err) +{ + struct ata_port *ap = dev->ap; + struct ata_eh_context *ehc = &ap->eh_context; + + ehc->tries[dev->devno]--; + + switch (err) { + case -ENODEV: + /* device missing or wrong IDENTIFY data, schedule probing */ + ehc->i.probe_mask |= (1 << dev->devno); + case -EINVAL: + /* give it just one more chance */ + ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); + case -EIO: + if (ehc->tries[dev->devno] == 1) { + /* This is the last chance, better to slow + * down than lose it. + */ + sata_down_spd_limit(ap); + ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); + } + } + + if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { + /* disable device if it has used up all its chances */ + ata_dev_disable(dev); + + /* detach if offline */ + if (ata_port_offline(ap)) + ata_eh_detach_dev(dev); + + /* probe if requested */ + if ((ehc->i.probe_mask & (1 << dev->devno)) && + !(ehc->did_probe_mask & (1 << dev->devno))) { + ata_eh_detach_dev(dev); + ata_dev_init(dev); + + ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; + ehc->did_probe_mask |= (1 << dev->devno); + ehc->i.action |= ATA_EH_SOFTRESET; + } + } else { + /* soft didn't work? be haaaaard */ + if (ehc->i.flags & ATA_EHI_DID_RESET) + ehc->i.action |= ATA_EH_HARDRESET; + else + ehc->i.action |= ATA_EH_SOFTRESET; + } +} + /** * ata_eh_recover - recover host port after error * @ap: host port to recover @@ -1997,50 +2224,7 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, goto out; dev_fail: - ehc->tries[dev->devno]--; - - switch (rc) { - case -ENODEV: - /* device missing or wrong IDENTIFY data, schedule probing */ - ehc->i.probe_mask |= (1 << dev->devno); - case -EINVAL: - /* give it just one more chance */ - ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); - case -EIO: - if (ehc->tries[dev->devno] == 1) { - /* This is the last chance, better to slow - * down than lose it. - */ - sata_down_spd_limit(ap); - ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); - } - } - - if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { - /* disable device if it has used up all its chances */ - ata_dev_disable(dev); - - /* detach if offline */ - if (ata_port_offline(ap)) - ata_eh_detach_dev(dev); - - /* probe if requested */ - if ((ehc->i.probe_mask & (1 << dev->devno)) && - !(ehc->did_probe_mask & (1 << dev->devno))) { - ata_eh_detach_dev(dev); - ata_dev_init(dev); - - ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; - ehc->did_probe_mask |= (1 << dev->devno); - ehc->i.action |= ATA_EH_SOFTRESET; - } - } else { - /* soft didn't work? be haaaaard */ - if (ehc->i.flags & ATA_EHI_DID_RESET) - ehc->i.action |= ATA_EH_HARDRESET; - else - ehc->i.action |= ATA_EH_SOFTRESET; - } + ata_eh_handle_dev_fail(dev, rc); if (ata_port_nr_enabled(ap)) { ata_port_printk(ap, KERN_WARNING, "failed to recover some " diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index cfde22da07a..12ac0b511f7 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2947,17 +2947,22 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) return rc; } -void ata_scsi_scan_host(struct ata_port *ap) +void ata_scsi_scan_host(struct ata_port *ap, int sync) { + int tries = 5; + struct ata_device *last_failed_dev = NULL; + struct ata_device *dev; unsigned int i; if (ap->flags & ATA_FLAG_DISABLED) return; + repeat: for (i = 0; i < ATA_MAX_DEVICES; i++) { - struct ata_device *dev = &ap->device[i]; struct scsi_device *sdev; + dev = &ap->device[i]; + if (!ata_dev_enabled(dev) || dev->sdev) continue; @@ -2967,6 +2972,45 @@ void ata_scsi_scan_host(struct ata_port *ap) scsi_device_put(sdev); } } + + /* If we scanned while EH was in progress or allocation + * failure occurred, scan would have failed silently. Check + * whether all devices are attached. + */ + for (i = 0; i < ATA_MAX_DEVICES; i++) { + dev = &ap->device[i]; + if (ata_dev_enabled(dev) && !dev->sdev) + break; + } + if (i == ATA_MAX_DEVICES) + return; + + /* we're missing some SCSI devices */ + if (sync) { + /* If caller requested synchrnous scan && we've made + * any progress, sleep briefly and repeat. + */ + if (dev != last_failed_dev) { + msleep(100); + last_failed_dev = dev; + goto repeat; + } + + /* We might be failing to detect boot device, give it + * a few more chances. + */ + if (--tries) { + msleep(100); + goto repeat; + } + + ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan " + "failed without making any progress,\n" + " switching to async\n"); + } + + queue_delayed_work(ata_aux_wq, &ap->hotplug_task, + round_jiffies_relative(HZ)); } /** @@ -3093,20 +3137,7 @@ void ata_scsi_hotplug(struct work_struct *work) } /* scan for new ones */ - ata_scsi_scan_host(ap); - - /* If we scanned while EH was in progress, scan would have - * failed silently. Requeue if there are enabled but - * unattached devices. - */ - for (i = 0; i < ATA_MAX_DEVICES; i++) { - struct ata_device *dev = &ap->device[i]; - if (ata_dev_enabled(dev) && !dev->sdev) { - queue_delayed_work(ata_aux_wq, &ap->hotplug_task, - round_jiffies_relative(HZ)); - break; - } - } + ata_scsi_scan_host(ap, 0); DPRINTK("EXIT\n"); } diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index fa1c22c7b38..6c289c7b132 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -1,5 +1,5 @@ /* - * libata-bmdma.c - helper library for PCI IDE BMDMA + * libata-sff.c - helper library for PCI IDE BMDMA * * Maintained by: Jeff Garzik <jgarzik@pobox.com> * Please ALWAYS copy linux-ide@vger.kernel.org @@ -211,6 +211,8 @@ void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) tf->hob_lbal = ioread8(ioaddr->lbal_addr); tf->hob_lbam = ioread8(ioaddr->lbam_addr); tf->hob_lbah = ioread8(ioaddr->lbah_addr); + iowrite8(tf->ctl, ioaddr->ctl_addr); + ap->last_ctl = tf->ctl; } } @@ -604,13 +606,17 @@ int ata_pci_init_bmdma(struct ata_host *host) } /** - * ata_pci_init_native_host - acquire native ATA resources and init host + * ata_pci_init_sff_host - acquire native PCI ATA resources and init host * @host: target ATA host * * Acquire native PCI ATA resources for @host and initialize the * first two ports of @host accordingly. Ports marked dummy are * skipped and allocation failure makes the port dummy. * + * Note that native PCI resources are valid even for legacy hosts + * as we fix up pdev resources array early in boot, so this + * function can be used for both native and legacy SFF hosts. + * * LOCKING: * Inherited from calling layer (may sleep). * @@ -618,7 +624,7 @@ int ata_pci_init_bmdma(struct ata_host *host) * 0 if at least one port is initialized, -ENODEV if no port is * available. */ -int ata_pci_init_native_host(struct ata_host *host) +int ata_pci_init_sff_host(struct ata_host *host) { struct device *gdev = host->dev; struct pci_dev *pdev = to_pci_dev(gdev); @@ -673,7 +679,7 @@ int ata_pci_init_native_host(struct ata_host *host) } /** - * ata_pci_prepare_native_host - helper to prepare native PCI ATA host + * ata_pci_prepare_sff_host - helper to prepare native PCI ATA host * @pdev: target PCI device * @ppi: array of port_info, must be enough for two ports * @r_host: out argument for the initialized ATA host @@ -687,9 +693,9 @@ int ata_pci_init_native_host(struct ata_host *host) * RETURNS: * 0 on success, -errno otherwise. */ -int ata_pci_prepare_native_host(struct pci_dev *pdev, - const struct ata_port_info * const * ppi, - struct ata_host **r_host) +int ata_pci_prepare_sff_host(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct ata_host **r_host) { struct ata_host *host; int rc; @@ -705,7 +711,7 @@ int ata_pci_prepare_native_host(struct pci_dev *pdev, goto err_out; } - rc = ata_pci_init_native_host(host); + rc = ata_pci_init_sff_host(host); if (rc) goto err_out; @@ -730,221 +736,6 @@ int ata_pci_prepare_native_host(struct pci_dev *pdev, return rc; } -struct ata_legacy_devres { - unsigned int mask; - unsigned long cmd_port[2]; - void __iomem * cmd_addr[2]; - void __iomem * ctl_addr[2]; - unsigned int irq[2]; - void * irq_dev_id[2]; -}; - -static void ata_legacy_free_irqs(struct ata_legacy_devres *legacy_dr) -{ - int i; - - for (i = 0; i < 2; i++) { - if (!legacy_dr->irq[i]) - continue; - - free_irq(legacy_dr->irq[i], legacy_dr->irq_dev_id[i]); - legacy_dr->irq[i] = 0; - legacy_dr->irq_dev_id[i] = NULL; - } -} - -static void ata_legacy_release(struct device *gdev, void *res) -{ - struct ata_legacy_devres *this = res; - int i; - - ata_legacy_free_irqs(this); - - for (i = 0; i < 2; i++) { - if (this->cmd_addr[i]) - ioport_unmap(this->cmd_addr[i]); - if (this->ctl_addr[i]) - ioport_unmap(this->ctl_addr[i]); - if (this->cmd_port[i]) - release_region(this->cmd_port[i], 8); - } -} - -static int ata_init_legacy_port(struct ata_port *ap, - struct ata_legacy_devres *legacy_dr) -{ - struct ata_host *host = ap->host; - int port_no = ap->port_no; - unsigned long cmd_port, ctl_port; - - if (port_no == 0) { - cmd_port = ATA_PRIMARY_CMD; - ctl_port = ATA_PRIMARY_CTL; - } else { - cmd_port = ATA_SECONDARY_CMD; - ctl_port = ATA_SECONDARY_CTL; - } - - /* request cmd_port */ - if (request_region(cmd_port, 8, "libata")) - legacy_dr->cmd_port[port_no] = cmd_port; - else { - dev_printk(KERN_WARNING, host->dev, - "0x%0lX IDE port busy\n", cmd_port); - return -EBUSY; - } - - /* iomap cmd and ctl ports */ - legacy_dr->cmd_addr[port_no] = ioport_map(cmd_port, 8); - legacy_dr->ctl_addr[port_no] = ioport_map(ctl_port, 1); - if (!legacy_dr->cmd_addr[port_no] || !legacy_dr->ctl_addr[port_no]) { - dev_printk(KERN_WARNING, host->dev, - "failed to map cmd/ctl ports\n"); - return -ENOMEM; - } - - /* init IO addresses */ - ap->ioaddr.cmd_addr = legacy_dr->cmd_addr[port_no]; - ap->ioaddr.altstatus_addr = legacy_dr->ctl_addr[port_no]; - ap->ioaddr.ctl_addr = legacy_dr->ctl_addr[port_no]; - ata_std_ports(&ap->ioaddr); - - return 0; -} - -/** - * ata_init_legacy_host - acquire legacy ATA resources and init ATA host - * @host: target ATA host - * @was_busy: out parameter, indicates whether any port was busy - * - * Acquire legacy ATA resources for the first two ports of @host - * and initialize it accordingly. Ports marked dummy are skipped - * and resource acquistion failure makes the port dummy. - * - * LOCKING: - * Inherited from calling layer (may sleep). - * - * RETURNS: - * 0 if at least one port is initialized, -ENODEV if no port is - * available. - */ -static int ata_init_legacy_host(struct ata_host *host, int *was_busy) -{ - struct device *gdev = host->dev; - struct ata_legacy_devres *legacy_dr; - int i, rc; - - if (!devres_open_group(gdev, NULL, GFP_KERNEL)) - return -ENOMEM; - - rc = -ENOMEM; - legacy_dr = devres_alloc(ata_legacy_release, sizeof(*legacy_dr), - GFP_KERNEL); - if (!legacy_dr) - goto err_out; - devres_add(gdev, legacy_dr); - - for (i = 0; i < 2; i++) { - if (ata_port_is_dummy(host->ports[i])) - continue; - - rc = ata_init_legacy_port(host->ports[i], legacy_dr); - if (rc == 0) - legacy_dr->mask |= 1 << i; - else { - if (rc == -EBUSY) - (*was_busy)++; - host->ports[i]->ops = &ata_dummy_port_ops; - } - } - - if (!legacy_dr->mask) { - dev_printk(KERN_ERR, gdev, "no available legacy port\n"); - return -ENODEV; - } - - devres_remove_group(gdev, NULL); - return 0; - - err_out: - devres_release_group(gdev, NULL); - return rc; -} - -/** - * ata_request_legacy_irqs - request legacy ATA IRQs - * @host: target ATA host - * @handler: array of IRQ handlers - * @irq_flags: array of IRQ flags - * @dev_id: array of IRQ dev_ids - * - * Request legacy IRQs for non-dummy legacy ports in @host. All - * IRQ parameters are passed as array to allow ports to have - * separate IRQ handlers. - * - * LOCKING: - * Inherited from calling layer (may sleep). - * - * RETURNS: - * 0 on success, -errno otherwise. - */ -static int ata_request_legacy_irqs(struct ata_host *host, - irq_handler_t const *handler, - const unsigned int *irq_flags, - void * const *dev_id) -{ - struct device *gdev = host->dev; - struct ata_legacy_devres *legacy_dr; - int i, rc; - - legacy_dr = devres_find(host->dev, ata_legacy_release, NULL, NULL); - BUG_ON(!legacy_dr); - - for (i = 0; i < 2; i++) { - unsigned int irq; - - /* FIXME: ATA_*_IRQ() should take generic device not pci_dev */ - if (i == 0) - irq = ATA_PRIMARY_IRQ(to_pci_dev(gdev)); - else - irq = ATA_SECONDARY_IRQ(to_pci_dev(gdev)); - - if (!(legacy_dr->mask & (1 << i))) - continue; - - if (!handler[i]) { - dev_printk(KERN_ERR, gdev, - "NULL handler specified for port %d\n", i); - rc = -EINVAL; - goto err_out; - } - - rc = request_irq(irq, handler[i], irq_flags[i], DRV_NAME, - dev_id[i]); - if (rc) { - dev_printk(KERN_ERR, gdev, - "irq %u request failed (errno=%d)\n", irq, rc); - goto err_out; - } - - /* record irq allocation in legacy_dr */ - legacy_dr->irq[i] = irq; - legacy_dr->irq_dev_id[i] = dev_id[i]; - - /* only used to print info */ - if (i == 0) - host->irq = irq; - else - host->irq2 = irq; - } - - return 0; - - err_out: - ata_legacy_free_irqs(legacy_dr); - return rc; -} - /** * ata_pci_init_one - Initialize/register PCI IDE host controller * @pdev: Controller to be initialized @@ -1029,35 +820,11 @@ int ata_pci_init_one(struct pci_dev *pdev, #endif } - /* alloc and init host */ - host = ata_host_alloc_pinfo(dev, ppi, 2); - if (!host) { - dev_printk(KERN_ERR, &pdev->dev, - "failed to allocate ATA host\n"); - rc = -ENOMEM; + /* prepare host */ + rc = ata_pci_prepare_sff_host(pdev, ppi, &host); + if (rc) goto err_out; - } - if (!legacy_mode) { - rc = ata_pci_init_native_host(host); - if (rc) - goto err_out; - } else { - int was_busy = 0; - - rc = ata_init_legacy_host(host, &was_busy); - if (was_busy) - pcim_pin_device(pdev); - if (rc) - goto err_out; - - /* request respective PCI regions, may fail */ - rc = pci_request_region(pdev, 1, DRV_NAME); - rc = pci_request_region(pdev, 3, DRV_NAME); - } - - /* init BMDMA, may fail */ - ata_pci_init_bmdma(host); pci_set_master(pdev); /* start host and request IRQ */ @@ -1068,17 +835,28 @@ int ata_pci_init_one(struct pci_dev *pdev, if (!legacy_mode) { rc = devm_request_irq(dev, pdev->irq, pi->port_ops->irq_handler, IRQF_SHARED, DRV_NAME, host); + if (rc) + goto err_out; host->irq = pdev->irq; } else { - irq_handler_t handler[2] = { host->ops->irq_handler, - host->ops->irq_handler }; - unsigned int irq_flags[2] = { IRQF_SHARED, IRQF_SHARED }; - void *dev_id[2] = { host, host }; + if (!ata_port_is_dummy(host->ports[0])) { + host->irq = ATA_PRIMARY_IRQ(pdev); + rc = devm_request_irq(dev, host->irq, + pi->port_ops->irq_handler, + IRQF_SHARED, DRV_NAME, host); + if (rc) + goto err_out; + } - rc = ata_request_legacy_irqs(host, handler, irq_flags, dev_id); + if (!ata_port_is_dummy(host->ports[1])) { + host->irq2 = ATA_SECONDARY_IRQ(pdev); + rc = devm_request_irq(dev, host->irq2, + pi->port_ops->irq_handler, + IRQF_SHARED, DRV_NAME, host); + if (rc) + goto err_out; + } } - if (rc) - goto err_out; /* register */ rc = ata_host_register(host, pi->sht); diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index ba17fc5f2e9..564cd234c80 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -112,7 +112,7 @@ static inline int ata_acpi_on_devcfg(struct ata_device *adev) { return 0; } /* libata-scsi.c */ extern int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht); -extern void ata_scsi_scan_host(struct ata_port *ap); +extern void ata_scsi_scan_host(struct ata_port *ap, int sync); extern int ata_scsi_offline_dev(struct ata_device *dev); extern void ata_scsi_hotplug(struct work_struct *work); extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, @@ -151,6 +151,7 @@ extern int ata_bus_probe(struct ata_port *ap); extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); extern void ata_scsi_error(struct Scsi_Host *host); extern void ata_port_wait_eh(struct ata_port *ap); +extern void ata_eh_fastdrain_timerfn(unsigned long arg); extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); /* libata-sff.c */ diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index 30c4276ec88..010436795d2 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c @@ -455,23 +455,21 @@ static struct ata_port_operations ali_c5_port_ops = { static void ali_init_chipset(struct pci_dev *pdev) { - u8 rev, tmp; + u8 tmp; struct pci_dev *north, *isa_bridge; - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); - /* * The chipset revision selects the driver operations and * mode data. */ - if (rev >= 0x20 && rev < 0xC2) { + if (pdev->revision >= 0x20 && pdev->revision < 0xC2) { /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */ pci_read_config_byte(pdev, 0x4B, &tmp); /* Clear CD-ROM DMA write bit */ tmp &= 0x7F; pci_write_config_byte(pdev, 0x4B, tmp); - } else if (rev >= 0xC2) { + } else if (pdev->revision >= 0xC2) { /* Enable cable detection logic */ pci_read_config_byte(pdev, 0x4B, &tmp); pci_write_config_byte(pdev, 0x4B, tmp | 0x08); @@ -483,21 +481,21 @@ static void ali_init_chipset(struct pci_dev *pdev) /* Configure the ALi bridge logic. For non ALi rely on BIOS. Set the south bridge enable bit */ pci_read_config_byte(isa_bridge, 0x79, &tmp); - if (rev == 0xC2) + if (pdev->revision == 0xC2) pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04); - else if (rev > 0xC2 && rev < 0xC5) + else if (pdev->revision > 0xC2 && pdev->revision < 0xC5) pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02); } - if (rev >= 0x20) { + if (pdev->revision >= 0x20) { /* * CD_ROM DMA on (0x53 bit 0). Enable this even if we want * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control * via 0x54/55. */ pci_read_config_byte(pdev, 0x53, &tmp); - if (rev <= 0x20) + if (pdev->revision <= 0x20) tmp &= ~0x02; - if (rev >= 0xc7) + if (pdev->revision >= 0xc7) tmp |= 0x03; else tmp |= 0x01; /* CD_ROM enable for DMA */ @@ -579,25 +577,23 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) }; const struct ata_port_info *ppi[] = { NULL, NULL }; - u8 rev, tmp; + u8 tmp; struct pci_dev *isa_bridge; - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); - /* * The chipset revision selects the driver operations and * mode data. */ - if (rev < 0x20) { + if (pdev->revision < 0x20) { ppi[0] = &info_early; - } else if (rev < 0xC2) { + } else if (pdev->revision < 0xC2) { ppi[0] = &info_20; - } else if (rev == 0xC2) { + } else if (pdev->revision == 0xC2) { ppi[0] = &info_c2; - } else if (rev == 0xC3) { + } else if (pdev->revision == 0xC3) { ppi[0] = &info_c3; - } else if (rev == 0xC4) { + } else if (pdev->revision == 0xC4) { ppi[0] = &info_c4; } else ppi[0] = &info_c5; @@ -605,7 +601,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ali_init_chipset(pdev); isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); - if (isa_bridge && rev >= 0x20 && rev < 0xC2) { + if (isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) { /* Are we paired with a UDMA capable chip */ pci_read_config_byte(isa_bridge, 0x5E, &tmp); if ((tmp & 0x1E) == 0x12) diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index b9c44c575ce..b09facad63e 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c @@ -623,17 +623,15 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) const struct ata_port_info *ppi[] = { NULL, NULL }; static int printed_version; int type = id->driver_data; - u8 rev; u8 fifo; if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); pci_read_config_byte(pdev, 0x41, &fifo); /* Check for AMD7409 without swdma errata and if found adjust type */ - if (type == 1 && rev > 0x7) + if (type == 1 && pdev->revision > 0x7) type = 2; /* Check for AMD7411 */ diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c index 6bf037d82b5..7dc76e71bd5 100644 --- a/drivers/ata/pata_cs5520.c +++ b/drivers/ata/pata_cs5520.c @@ -275,7 +275,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi for (i = 0; i < 2; i++) { static const int irq[] = { 14, 15 }; - struct ata_port *ap = host->ports[0]; + struct ata_port *ap = host->ports[i]; if (ata_port_is_dummy(ap)) continue; diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c index 3fca5898642..68f150a1e2f 100644 --- a/drivers/ata/pata_cs5530.c +++ b/drivers/ata/pata_cs5530.c @@ -266,7 +266,7 @@ static int cs5530_init_chip(void) } pci_set_master(cs5530_0); - pci_set_mwi(cs5530_0); + pci_try_set_mwi(cs5530_0); /* * Set PCI CacheLineSize to 16-bytes: diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c index d928c910503..be0f05efac6 100644 --- a/drivers/ata/pata_hpt3x3.c +++ b/drivers/ata/pata_hpt3x3.c @@ -23,7 +23,7 @@ #include <linux/libata.h> #define DRV_NAME "pata_hpt3x3" -#define DRV_VERSION "0.4.3" +#define DRV_VERSION "0.5.3" /** * hpt3x3_set_piomode - PIO setup @@ -52,6 +52,7 @@ static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev) pci_write_config_dword(pdev, 0x48, r2); } +#if defined(CONFIG_PATA_HPT3X3_DMA) /** * hpt3x3_set_dmamode - DMA timing setup * @ap: ATA interface @@ -59,6 +60,9 @@ static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev) * * Set up the channel for MWDMA or UDMA modes. Much the same as with * PIO, load the mode number and then set MWDMA or UDMA flag. + * + * 0x44 : bit 0-2 master mode, 3-5 slave mode, etc + * 0x48 : bit 4/0 DMA/UDMA bit 5/1 for slave etc */ static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev) @@ -76,13 +80,26 @@ static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev) r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */ if (adev->dma_mode >= XFER_UDMA_0) - r2 |= 0x01 << dn; /* Ultra mode */ + r2 |= (0x10 << dn); /* Ultra mode */ else - r2 |= 0x10 << dn; /* MWDMA */ + r2 |= (0x01 << dn); /* MWDMA */ pci_write_config_dword(pdev, 0x44, r1); pci_write_config_dword(pdev, 0x48, r2); } +#endif /* CONFIG_PATA_HPT3X3_DMA */ + +/** + * hpt3x3_atapi_dma - ATAPI DMA check + * @qc: Queued command + * + * Just say no - we don't do ATAPI DMA + */ + +static int hpt3x3_atapi_dma(struct ata_queued_cmd *qc) +{ + return 1; +} static struct scsi_host_template hpt3x3_sht = { .module = THIS_MODULE, @@ -105,7 +122,9 @@ static struct scsi_host_template hpt3x3_sht = { static struct ata_port_operations hpt3x3_port_ops = { .port_disable = ata_port_disable, .set_piomode = hpt3x3_set_piomode, +#if defined(CONFIG_PATA_HPT3X3_DMA) .set_dmamode = hpt3x3_set_dmamode, +#endif .mode_filter = ata_pci_default_filter, .tf_load = ata_tf_load, @@ -124,6 +143,7 @@ static struct ata_port_operations hpt3x3_port_ops = { .bmdma_start = ata_bmdma_start, .bmdma_stop = ata_bmdma_stop, .bmdma_status = ata_bmdma_status, + .check_atapi_dma= hpt3x3_atapi_dma, .qc_prep = ata_qc_prep, .qc_issue = ata_qc_issue_prot, @@ -158,32 +178,79 @@ static void hpt3x3_init_chipset(struct pci_dev *dev) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20); } - /** * hpt3x3_init_one - Initialise an HPT343/363 - * @dev: PCI device + * @pdev: PCI device * @id: Entry in match table * - * Perform basic initialisation. The chip has a quirk that it won't - * function unless it is at XX00. The old ATA driver touched this up - * but we leave it for pci quirks to do properly. + * Perform basic initialisation. We set the device up so we access all + * ports via BAR4. This is neccessary to work around errata. */ -static int hpt3x3_init_one(struct pci_dev *dev, const struct pci_device_id *id) +static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { + static int printed_version; static const struct ata_port_info info = { .sht = &hpt3x3_sht, .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, +#if defined(CONFIG_PATA_HPT3X3_DMA) + /* Further debug needed */ .mwdma_mask = 0x07, .udma_mask = 0x07, +#endif .port_ops = &hpt3x3_port_ops }; + /* Register offsets of taskfiles in BAR4 area */ + static const u8 offset_cmd[2] = { 0x20, 0x28 }; + static const u8 offset_ctl[2] = { 0x36, 0x3E }; const struct ata_port_info *ppi[] = { &info, NULL }; - - hpt3x3_init_chipset(dev); - /* Now kick off ATA set up */ - return ata_pci_init_one(dev, ppi); + struct ata_host *host; + int i, rc; + void __iomem *base; + + hpt3x3_init_chipset(pdev); + + if (!printed_version++) + dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + + host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); + if (!host) + return -ENOMEM; + /* acquire resources and fill host */ + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + /* Everything is relative to BAR4 if we set up this way */ + rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME); + if (rc == -EBUSY) + pcim_pin_device(pdev); + if (rc) + return rc; + host->iomap = pcim_iomap_table(pdev); + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; + rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + return rc; + + base = host->iomap[4]; /* Bus mastering base */ + + for (i = 0; i < host->n_ports; i++) { + struct ata_ioports *ioaddr = &host->ports[i]->ioaddr; + + ioaddr->cmd_addr = base + offset_cmd[i]; + ioaddr->altstatus_addr = + ioaddr->ctl_addr = base + offset_ctl[i]; + ioaddr->scr_addr = NULL; + ata_std_ports(ioaddr); + ioaddr->bmdma_addr = base + 8 * i; + } + pci_set_master(pdev); + return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, + &hpt3x3_sht); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index b67bbf6516b..430673be1df 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c @@ -587,8 +587,7 @@ static int it821x_port_start(struct ata_port *ap) itdev->want[1][1] = ATA_ANY; itdev->last_device = -1; - pci_read_config_byte(pdev, PCI_REVISION_ID, &conf); - if (conf == 0x10) { + if (pdev->revision == 0x11) { itdev->timing10 = 1; /* Need to disable ATAPI DMA for this case */ if (!itdev->smart) diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index 368fac7d168..182e83c9047 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c @@ -467,13 +467,27 @@ mpc52xx_ata_remove(struct of_device *op) static int mpc52xx_ata_suspend(struct of_device *op, pm_message_t state) { - return 0; /* FIXME : What to do here ? */ + struct ata_host *host = dev_get_drvdata(&op->dev); + + return ata_host_suspend(host, state); } static int mpc52xx_ata_resume(struct of_device *op) { - return 0; /* FIXME : What to do here ? */ + struct ata_host *host = dev_get_drvdata(&op->dev); + struct mpc52xx_ata_priv *priv = host->private_data; + int rv; + + rv = mpc52xx_ata_hw_init(priv); + if (rv) { + printk(KERN_ERR DRV_NAME ": Error during HW init\n"); + return rv; + } + + ata_host_resume(host); + + return 0; } #endif diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index a56257c98fe..6da23feed03 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c @@ -382,6 +382,7 @@ static struct pcmcia_device_id pcmcia_devices[] = { PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178), PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), + PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674), diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c index 79f841bca59..a909f793ffc 100644 --- a/drivers/ata/pata_platform.c +++ b/drivers/ata/pata_platform.c @@ -213,8 +213,9 @@ static int __devinit pata_platform_probe(struct platform_device *pdev) pata_platform_setup_port(&ap->ioaddr, pp_info); /* activate */ - return ata_host_activate(host, platform_get_irq(pdev, 0), ata_interrupt, - pp_info->irq_flags, &pata_platform_sht); + return ata_host_activate(host, platform_get_irq(pdev, 0), + ata_interrupt, pp_info ? pp_info->irq_flags + : 0, &pata_platform_sht); } /** diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index 61502bc7bf1..36cdbd2b0bd 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c @@ -258,6 +258,17 @@ static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev) JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]); } +unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask) +{ + /* errata A308 workaround: limit ATAPI UDMA mode to UDMA4 */ + if (adev->class == ATA_DEV_ATAPI && + (mask & (0xE0 << ATA_SHIFT_UDMA))) { + printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME); + mask &= ~(0xE0 << ATA_SHIFT_UDMA); + } + return ata_pci_default_filter(adev, mask); +} + /** * scc_tf_load - send taskfile registers to host controller * @ap: Port to which output is sent @@ -352,6 +363,8 @@ static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf) tf->hob_lbal = in_be32(ioaddr->lbal_addr); tf->hob_lbam = in_be32(ioaddr->lbam_addr); tf->hob_lbah = in_be32(ioaddr->lbah_addr); + out_be32(ioaddr->ctl_addr, tf->ctl); + ap->last_ctl = tf->ctl; } } @@ -724,22 +737,36 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc) static u8 scc_bmdma_status (struct ata_port *ap) { - u8 host_stat; void __iomem *mmio = ap->ioaddr.bmdma_addr; - - host_stat = in_be32(mmio + SCC_DMA_STATUS); - - /* Workaround for PTERADD: emulate DMA_INTR when - * - IDE_STATUS[ERR] = 1 - * - INT_STATUS[INTRQ] = 1 - * - DMA_STATUS[IORACTA] = 1 - */ - if (!(host_stat & ATA_DMA_INTR)) { - u32 int_status = in_be32(mmio + SCC_DMA_INTST); - if (ata_altstatus(ap) & ATA_ERR && - int_status & INTSTS_INTRQ && - host_stat & ATA_DMA_ACTIVE) - host_stat |= ATA_DMA_INTR; + u8 host_stat = in_be32(mmio + SCC_DMA_STATUS); + u32 int_status = in_be32(mmio + SCC_DMA_INTST); + struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); + static int retry = 0; + + /* return if IOS_SS is cleared */ + if (!(in_be32(mmio + SCC_DMA_CMD) & ATA_DMA_START)) + return host_stat; + + /* errata A252,A308 workaround: Step4 */ + if ((ata_altstatus(ap) & ATA_ERR) && (int_status & INTSTS_INTRQ)) + return (host_stat | ATA_DMA_INTR); + + /* errata A308 workaround Step5 */ + if (int_status & INTSTS_IOIRQS) { + host_stat |= ATA_DMA_INTR; + + /* We don't check ATAPI DMA because it is limited to UDMA4 */ + if ((qc->tf.protocol == ATA_PROT_DMA && + qc->dev->xfer_mode > XFER_UDMA_4)) { + if (!(int_status & INTSTS_ACTEINT)) { + printk(KERN_WARNING "ata%u: operation failed (transfer data loss)\n", + ap->print_id); + host_stat |= ATA_DMA_ERR; + if (retry++) + ap->udma_mask &= ~(1 << qc->dev->xfer_mode); + } else + retry = 0; + } } return host_stat; @@ -892,10 +919,6 @@ static void scc_std_postreset (struct ata_port *ap, unsigned int *classes) { DPRINTK("ENTER\n"); - /* re-enable interrupts */ - if (!ap->ops->error_handler) - ap->ops->irq_on(ap); - /* is double-select really necessary? */ if (classes[0] != ATA_DEV_NONE) ap->ops->dev_select(ap, 1); @@ -1000,7 +1023,7 @@ static const struct ata_port_operations scc_pata_ops = { .port_disable = ata_port_disable, .set_piomode = scc_set_piomode, .set_dmamode = scc_set_dmamode, - .mode_filter = ata_pci_default_filter, + .mode_filter = scc_mode_filter, .tf_load = scc_tf_load, .tf_read = scc_tf_read, diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index 0231aba51ca..89691541fe5 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c @@ -410,11 +410,8 @@ static int serverworks_fixup_osb4(struct pci_dev *pdev) static int serverworks_fixup_csb(struct pci_dev *pdev) { - u8 rev; u8 btr; - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); - /* Third Channel Test */ if (!(PCI_FUNC(pdev->devfn) & 1)) { struct pci_dev * findev = NULL; @@ -456,7 +453,7 @@ static int serverworks_fixup_csb(struct pci_dev *pdev) if (!(PCI_FUNC(pdev->devfn) & 1)) btr |= 0x2; else - btr |= (rev >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2; + btr |= (pdev->revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2; pci_write_config_byte(pdev, 0x5A, btr); return btr; diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index 2b4508206a6..9a829a7cbc6 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c @@ -149,6 +149,9 @@ static int sis_pre_reset(struct ata_port *ap, unsigned long deadline) if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) return -ENOENT; + /* Clear the FIFO settings. We can't enable the FIFO until + we know we are poking at a disk */ + pci_write_config_byte(pdev, 0x4B, 0); return ata_std_prereset(ap, deadline); } @@ -928,9 +931,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) if (host != NULL) { chipset = sets; /* Match found */ if (sets->device == 0x630) { /* SIS630 */ - u8 host_rev; - pci_read_config_byte(host, PCI_REVISION_ID, &host_rev); - if (host_rev >= 0x30) /* 630 ET */ + if (host->revision >= 0x30) /* 630 ET */ chipset = &sis100_early; } break; @@ -974,7 +975,6 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) u16 trueid; u8 prefctl; u8 idecfg; - u8 sbrev; /* Try the second unmasking technique */ pci_read_config_byte(pdev, 0x4a, &idecfg); @@ -987,11 +987,10 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) lpc_bridge = pci_get_slot(pdev->bus, 0x10); /* Bus 0 Dev 2 Fn 0 */ if (lpc_bridge == NULL) break; - pci_read_config_byte(lpc_bridge, PCI_REVISION_ID, &sbrev); pci_read_config_byte(pdev, 0x49, &prefctl); pci_dev_put(lpc_bridge); - if (sbrev == 0x10 && (prefctl & 0x80)) { + if (lpc_bridge->revision == 0x10 && (prefctl & 0x80)) { chipset = &sis133_early; break; } diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c index bde73418962..8c2813aa6cd 100644 --- a/drivers/ata/pata_sl82c105.c +++ b/drivers/ata/pata_sl82c105.c @@ -270,7 +270,6 @@ static struct ata_port_operations sl82c105_port_ops = { static int sl82c105_bridge_revision(struct pci_dev *pdev) { struct pci_dev *bridge; - u8 rev; /* * The bridge should be part of the same device, but function 0. @@ -292,10 +291,8 @@ static int sl82c105_bridge_revision(struct pci_dev *pdev) /* * We need to find function 0's revision, not function 1 */ - pci_read_config_byte(bridge, PCI_REVISION_ID, &rev); - pci_dev_put(bridge); - return rev; + return bridge->revision; } diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index f0cadbe6aa1..f645fe22cd1 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -506,7 +506,6 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) struct pci_dev *isa = NULL; const struct via_isa_bridge *config; static int printed_version; - u8 t; u8 enable; u32 timing; @@ -520,9 +519,8 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) !!(config->flags & VIA_BAD_ID), config->id, NULL))) { - pci_read_config_byte(isa, PCI_REVISION_ID, &t); - if (t >= config->rev_min && - t <= config->rev_max) + if (isa->revision >= config->rev_min && + isa->revision <= config->rev_max) break; pci_dev_put(isa); } diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index 3de183461c3..a9c948d7604 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c @@ -190,34 +190,34 @@ static void inic_reset_port(void __iomem *port_base) writew(ctl, idma_ctl); } -static u32 inic_scr_read(struct ata_port *ap, unsigned sc_reg) +static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) { void __iomem *scr_addr = ap->ioaddr.scr_addr; void __iomem *addr; - u32 val; if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) - return 0xffffffffU; + return -EINVAL; addr = scr_addr + scr_map[sc_reg] * 4; - val = readl(scr_addr + scr_map[sc_reg] * 4); + *val = readl(scr_addr + scr_map[sc_reg] * 4); /* this controller has stuck DIAG.N, ignore it */ if (sc_reg == SCR_ERROR) - val &= ~SERR_PHYRDY_CHG; - return val; + *val &= ~SERR_PHYRDY_CHG; + return 0; } -static void inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) +static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) { void __iomem *scr_addr = ap->ioaddr.scr_addr; void __iomem *addr; if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) - return; + return -EINVAL; addr = scr_addr + scr_map[sc_reg] * 4; writel(val, scr_addr + scr_map[sc_reg] * 4); + return 0; } /* diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 3873b29c80d..8ec520885b9 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -29,19 +29,12 @@ I distinctly remember a couple workarounds (one related to PCI-X) are still needed. - 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane - probing/error handling in general. MUST HAVE. - - 3) Add hotplug support (easy, once new-EH support appears) - 4) Add NCQ support (easy to intermediate, once new-EH support appears) 5) Investigate problems with PCI Message Signalled Interrupts (MSI). 6) Add port multiplier support (intermediate) - 7) Test and verify 3.0 Gbps support - 8) Develop a low-power-consumption strategy, and implement it. 9) [Experiment, low priority] See if ATAPI can be supported using @@ -108,8 +101,6 @@ enum { MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, - MV_USE_Q_DEPTH = ATA_DEF_QUEUE, - MV_MAX_Q_DEPTH = 32, MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, @@ -133,18 +124,22 @@ enum { /* Host Flags */ MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ - MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | - ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING), + MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | + ATA_FLAG_PIO_POLLING, MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, CRQB_FLAG_READ = (1 << 0), CRQB_TAG_SHIFT = 1, + CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ + CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ CRQB_CMD_ADDR_SHIFT = 8, CRQB_CMD_CS = (0x2 << 11), CRQB_CMD_LAST = (1 << 15), CRPB_FLAG_STATUS_SHIFT = 8, + CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ + CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ EPRD_FLAG_END_OF_TBL = (1 << 31), @@ -230,31 +225,53 @@ enum { EDMA_ERR_IRQ_CAUSE_OFS = 0x8, EDMA_ERR_IRQ_MASK_OFS = 0xc, - EDMA_ERR_D_PAR = (1 << 0), - EDMA_ERR_PRD_PAR = (1 << 1), - EDMA_ERR_DEV = (1 << 2), - EDMA_ERR_DEV_DCON = (1 << 3), - EDMA_ERR_DEV_CON = (1 << 4), - EDMA_ERR_SERR = (1 << 5), - EDMA_ERR_SELF_DIS = (1 << 7), - EDMA_ERR_BIST_ASYNC = (1 << 8), - EDMA_ERR_CRBQ_PAR = (1 << 9), - EDMA_ERR_CRPB_PAR = (1 << 10), - EDMA_ERR_INTRL_PAR = (1 << 11), - EDMA_ERR_IORDY = (1 << 12), - EDMA_ERR_LNK_CTRL_RX = (0xf << 13), + EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ + EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ + EDMA_ERR_DEV = (1 << 2), /* device error */ + EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ + EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ + EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ + EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ + EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ + EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ + EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ + EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ + EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ + EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ + EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ + EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), - EDMA_ERR_LNK_DATA_RX = (0xf << 17), - EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), - EDMA_ERR_LNK_DATA_TX = (0x1f << 26), - EDMA_ERR_TRANS_PROTO = (1 << 31), - EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | - EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR | - EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR | - EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 | - EDMA_ERR_LNK_DATA_RX | - EDMA_ERR_LNK_DATA_TX | - EDMA_ERR_TRANS_PROTO), + EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ + EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ + EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ + EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ + EDMA_ERR_OVERRUN_5 = (1 << 5), + EDMA_ERR_UNDERRUN_5 = (1 << 6), + EDMA_EH_FREEZE = EDMA_ERR_D_PAR | + EDMA_ERR_PRD_PAR | + EDMA_ERR_DEV_DCON | + EDMA_ERR_DEV_CON | + EDMA_ERR_SERR | + EDMA_ERR_SELF_DIS | + EDMA_ERR_CRQB_PAR | + EDMA_ERR_CRPB_PAR | + EDMA_ERR_INTRL_PAR | + EDMA_ERR_IORDY | + EDMA_ERR_LNK_CTRL_RX_2 | + EDMA_ERR_LNK_DATA_RX | + EDMA_ERR_LNK_DATA_TX | + EDMA_ERR_TRANS_PROTO, + EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | + EDMA_ERR_PRD_PAR | + EDMA_ERR_DEV_DCON | + EDMA_ERR_DEV_CON | + EDMA_ERR_OVERRUN_5 | + EDMA_ERR_UNDERRUN_5 | + EDMA_ERR_SELF_DIS_5 | + EDMA_ERR_CRQB_PAR | + EDMA_ERR_CRPB_PAR | + EDMA_ERR_INTRL_PAR | + EDMA_ERR_IORDY, EDMA_REQ_Q_BASE_HI_OFS = 0x10, EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ @@ -267,10 +284,10 @@ enum { EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ EDMA_RSP_Q_PTR_SHIFT = 3, - EDMA_CMD_OFS = 0x28, - EDMA_EN = (1 << 0), - EDMA_DS = (1 << 1), - ATA_RST = (1 << 2), + EDMA_CMD_OFS = 0x28, /* EDMA command register */ + EDMA_EN = (1 << 0), /* enable EDMA */ + EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ + ATA_RST = (1 << 2), /* reset trans/link/phy */ EDMA_IORDY_TMOUT = 0x34, EDMA_ARB_CFG = 0x38, @@ -282,25 +299,28 @@ enum { MV_HP_ERRATA_60X1B2 = (1 << 3), MV_HP_ERRATA_60X1C0 = (1 << 4), MV_HP_ERRATA_XX42A0 = (1 << 5), - MV_HP_50XX = (1 << 6), - MV_HP_GEN_IIE = (1 << 7), + MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ + MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ + MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ /* Port private flags (pp_flags) */ - MV_PP_FLAG_EDMA_EN = (1 << 0), - MV_PP_FLAG_EDMA_DS_ACT = (1 << 1), + MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ + MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */ }; -#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) -#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0) -#define IS_GEN_I(hpriv) IS_50XX(hpriv) -#define IS_GEN_II(hpriv) IS_60XX(hpriv) +#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) +#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) enum { MV_DMA_BOUNDARY = 0xffffffffU, + /* mask of register bits containing lower 32 bits + * of EDMA request queue DMA address + */ EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, + /* ditto, for response queue */ EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, }; @@ -352,6 +372,10 @@ struct mv_port_priv { dma_addr_t crpb_dma; struct mv_sg *sg_tbl; dma_addr_t sg_tbl_dma; + + unsigned int req_idx; + unsigned int resp_idx; + u32 pp_flags; }; @@ -380,18 +404,19 @@ struct mv_host_priv { }; static void mv_irq_clear(struct ata_port *ap); -static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); -static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); -static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in); -static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); -static void mv_phy_reset(struct ata_port *ap); -static void __mv_phy_reset(struct ata_port *ap, int can_sleep); +static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); +static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); +static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); +static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); static int mv_port_start(struct ata_port *ap); static void mv_port_stop(struct ata_port *ap); static void mv_qc_prep(struct ata_queued_cmd *qc); static void mv_qc_prep_iie(struct ata_queued_cmd *qc); static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); -static void mv_eng_timeout(struct ata_port *ap); +static void mv_error_handler(struct ata_port *ap); +static void mv_post_int_cmd(struct ata_queued_cmd *qc); +static void mv_eh_freeze(struct ata_port *ap); +static void mv_eh_thaw(struct ata_port *ap); static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, @@ -415,14 +440,31 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio); static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port_no); -static void mv_stop_and_reset(struct ata_port *ap); -static struct scsi_host_template mv_sht = { +static struct scsi_host_template mv5_sht = { .module = THIS_MODULE, .name = DRV_NAME, .ioctl = ata_scsi_ioctl, .queuecommand = ata_scsi_queuecmd, - .can_queue = MV_USE_Q_DEPTH, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = MV_MAX_SG_CT, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .emulated = ATA_SHT_EMULATED, + .use_clustering = 1, + .proc_name = DRV_NAME, + .dma_boundary = MV_DMA_BOUNDARY, + .slave_configure = ata_scsi_slave_config, + .slave_destroy = ata_scsi_slave_destroy, + .bios_param = ata_std_bios_param, +}; + +static struct scsi_host_template mv6_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .ioctl = ata_scsi_ioctl, + .queuecommand = ata_scsi_queuecmd, + .can_queue = ATA_DEF_QUEUE, .this_id = ATA_SHT_THIS_ID, .sg_tablesize = MV_MAX_SG_CT, .cmd_per_lun = ATA_SHT_CMD_PER_LUN, @@ -444,19 +486,21 @@ static const struct ata_port_operations mv5_ops = { .exec_command = ata_exec_command, .dev_select = ata_std_dev_select, - .phy_reset = mv_phy_reset, .cable_detect = ata_cable_sata, .qc_prep = mv_qc_prep, .qc_issue = mv_qc_issue, .data_xfer = ata_data_xfer, - .eng_timeout = mv_eng_timeout, - .irq_clear = mv_irq_clear, .irq_on = ata_irq_on, .irq_ack = ata_irq_ack, + .error_handler = mv_error_handler, + .post_internal_cmd = mv_post_int_cmd, + .freeze = mv_eh_freeze, + .thaw = mv_eh_thaw, + .scr_read = mv5_scr_read, .scr_write = mv5_scr_write, @@ -473,19 +517,21 @@ static const struct ata_port_operations mv6_ops = { .exec_command = ata_exec_command, .dev_select = ata_std_dev_select, - .phy_reset = mv_phy_reset, .cable_detect = ata_cable_sata, .qc_prep = mv_qc_prep, .qc_issue = mv_qc_issue, .data_xfer = ata_data_xfer, - .eng_timeout = mv_eng_timeout, - .irq_clear = mv_irq_clear, .irq_on = ata_irq_on, .irq_ack = ata_irq_ack, + .error_handler = mv_error_handler, + .post_internal_cmd = mv_post_int_cmd, + .freeze = mv_eh_freeze, + .thaw = mv_eh_thaw, + .scr_read = mv_scr_read, .scr_write = mv_scr_write, @@ -502,19 +548,21 @@ static const struct ata_port_operations mv_iie_ops = { .exec_command = ata_exec_command, .dev_select = ata_std_dev_select, - .phy_reset = mv_phy_reset, .cable_detect = ata_cable_sata, .qc_prep = mv_qc_prep_iie, .qc_issue = mv_qc_issue, .data_xfer = ata_data_xfer, - .eng_timeout = mv_eng_timeout, - .irq_clear = mv_irq_clear, .irq_on = ata_irq_on, .irq_ack = ata_irq_ack, + .error_handler = mv_error_handler, + .post_internal_cmd = mv_post_int_cmd, + .freeze = mv_eh_freeze, + .thaw = mv_eh_thaw, + .scr_read = mv_scr_read, .scr_write = mv_scr_write, @@ -530,38 +578,38 @@ static const struct ata_port_info mv_port_info[] = { .port_ops = &mv5_ops, }, { /* chip_508x */ - .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), + .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, .pio_mask = 0x1f, /* pio0-4 */ .udma_mask = ATA_UDMA6, .port_ops = &mv5_ops, }, { /* chip_5080 */ - .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), + .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, .pio_mask = 0x1f, /* pio0-4 */ .udma_mask = ATA_UDMA6, .port_ops = &mv5_ops, }, { /* chip_604x */ - .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), + .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, .pio_mask = 0x1f, /* pio0-4 */ .udma_mask = ATA_UDMA6, .port_ops = &mv6_ops, }, { /* chip_608x */ - .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | - MV_FLAG_DUAL_HC), + .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | + MV_FLAG_DUAL_HC, .pio_mask = 0x1f, /* pio0-4 */ .udma_mask = ATA_UDMA6, .port_ops = &mv6_ops, }, { /* chip_6042 */ - .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), + .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, .pio_mask = 0x1f, /* pio0-4 */ .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, }, { /* chip_7042 */ - .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), + .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, .pio_mask = 0x1f, /* pio0-4 */ .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, @@ -709,6 +757,46 @@ static void mv_irq_clear(struct ata_port *ap) { } +static void mv_set_edma_ptrs(void __iomem *port_mmio, + struct mv_host_priv *hpriv, + struct mv_port_priv *pp) +{ + u32 index; + + /* + * initialize request queue + */ + index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT; + + WARN_ON(pp->crqb_dma & 0x3ff); + writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); + writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, + port_mmio + EDMA_REQ_Q_IN_PTR_OFS); + + if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) + writelfl((pp->crqb_dma & 0xffffffff) | index, + port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); + else + writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); + + /* + * initialize response queue + */ + index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT; + + WARN_ON(pp->crpb_dma & 0xff); + writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); + + if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) + writelfl((pp->crpb_dma & 0xffffffff) | index, + port_mmio + EDMA_RSP_Q_IN_PTR_OFS); + else + writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); + + writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, + port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); +} + /** * mv_start_dma - Enable eDMA engine * @base: port base address @@ -720,9 +808,15 @@ static void mv_irq_clear(struct ata_port *ap) * LOCKING: * Inherited from caller. */ -static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) +static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv, + struct mv_port_priv *pp) { - if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) { + if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { + /* clear EDMA event indicators, if any */ + writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS); + + mv_set_edma_ptrs(base, hpriv, pp); + writelfl(EDMA_EN, base + EDMA_CMD_OFS); pp->pp_flags |= MV_PP_FLAG_EDMA_EN; } @@ -730,7 +824,7 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) } /** - * mv_stop_dma - Disable eDMA engine + * __mv_stop_dma - Disable eDMA engine * @ap: ATA channel to manipulate * * Verify the local cache of the eDMA state is accurate with a @@ -739,14 +833,14 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) * LOCKING: * Inherited from caller. */ -static void mv_stop_dma(struct ata_port *ap) +static int __mv_stop_dma(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; u32 reg; - int i; + int i, err = 0; - if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) { + if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { /* Disable EDMA if active. The disable bit auto clears. */ writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); @@ -758,16 +852,30 @@ static void mv_stop_dma(struct ata_port *ap) /* now properly wait for the eDMA to stop */ for (i = 1000; i > 0; i--) { reg = readl(port_mmio + EDMA_CMD_OFS); - if (!(EDMA_EN & reg)) { + if (!(reg & EDMA_EN)) break; - } + udelay(100); } - if (EDMA_EN & reg) { + if (reg & EDMA_EN) { ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); - /* FIXME: Consider doing a reset here to recover */ + err = -EIO; } + + return err; +} + +static int mv_stop_dma(struct ata_port *ap) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(&ap->host->lock, flags); + rc = __mv_stop_dma(ap); + spin_unlock_irqrestore(&ap->host->lock, flags); + + return rc; } #ifdef ATA_DEBUG @@ -866,30 +974,35 @@ static unsigned int mv_scr_offset(unsigned int sc_reg_in) return ofs; } -static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in) +static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val) { unsigned int ofs = mv_scr_offset(sc_reg_in); - if (0xffffffffU != ofs) - return readl(mv_ap_base(ap) + ofs); - else - return (u32) ofs; + if (ofs != 0xffffffffU) { + *val = readl(mv_ap_base(ap) + ofs); + return 0; + } else + return -EINVAL; } -static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) +static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) { unsigned int ofs = mv_scr_offset(sc_reg_in); - if (0xffffffffU != ofs) + if (ofs != 0xffffffffU) { writelfl(val, mv_ap_base(ap) + ofs); + return 0; + } else + return -EINVAL; } -static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio) +static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, + void __iomem *port_mmio) { u32 cfg = readl(port_mmio + EDMA_CFG_OFS); /* set up non-NCQ EDMA configuration */ - cfg &= ~(1 << 9); /* disable equeue */ + cfg &= ~(1 << 9); /* disable eQue */ if (IS_GEN_I(hpriv)) { cfg &= ~0x1f; /* clear queue depth */ @@ -909,7 +1022,7 @@ static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio) cfg |= (1 << 18); /* enab early completion */ cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */ - cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */ + cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */ } writelfl(cfg, port_mmio + EDMA_CFG_OFS); @@ -933,6 +1046,7 @@ static int mv_port_start(struct ata_port *ap) void __iomem *port_mmio = mv_ap_base(ap); void *mem; dma_addr_t mem_dma; + unsigned long flags; int rc; pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); @@ -971,28 +1085,13 @@ static int mv_port_start(struct ata_port *ap) pp->sg_tbl = mem; pp->sg_tbl_dma = mem_dma; - mv_edma_cfg(hpriv, port_mmio); - - writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); - writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, - port_mmio + EDMA_REQ_Q_IN_PTR_OFS); - - if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) - writelfl(pp->crqb_dma & 0xffffffff, - port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); - else - writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); + spin_lock_irqsave(&ap->host->lock, flags); - writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); + mv_edma_cfg(ap, hpriv, port_mmio); - if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) - writelfl(pp->crpb_dma & 0xffffffff, - port_mmio + EDMA_RSP_Q_IN_PTR_OFS); - else - writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); + mv_set_edma_ptrs(port_mmio, hpriv, pp); - writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, - port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); + spin_unlock_irqrestore(&ap->host->lock, flags); /* Don't turn on EDMA here...do it before DMA commands only. Else * we'll be unable to send non-data, PIO, etc due to restricted access @@ -1013,11 +1112,7 @@ static int mv_port_start(struct ata_port *ap) */ static void mv_port_stop(struct ata_port *ap) { - unsigned long flags; - - spin_lock_irqsave(&ap->host->lock, flags); mv_stop_dma(ap); - spin_unlock_irqrestore(&ap->host->lock, flags); } /** @@ -1055,11 +1150,6 @@ static unsigned int mv_fill_sg(struct ata_queued_cmd *qc) return n_sg; } -static inline unsigned mv_inc_q_index(unsigned index) -{ - return (index + 1) & MV_MAX_Q_DEPTH_MASK; -} - static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) { u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | @@ -1088,7 +1178,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) u16 flags = 0; unsigned in_index; - if (ATA_PROT_DMA != qc->tf.protocol) + if (qc->tf.protocol != ATA_PROT_DMA) return; /* Fill in command request block @@ -1097,10 +1187,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) flags |= CRQB_FLAG_READ; WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; + flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/ - /* get current queue index from hardware */ - in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) - >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; + /* get current queue index from software */ + in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; pp->crqb[in_index].sg_addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); @@ -1180,7 +1270,7 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) unsigned in_index; u32 flags = 0; - if (ATA_PROT_DMA != qc->tf.protocol) + if (qc->tf.protocol != ATA_PROT_DMA) return; /* Fill in Gen IIE command request block @@ -1190,10 +1280,11 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; + flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really- + what we use as our tag */ - /* get current queue index from hardware */ - in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) - >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; + /* get current queue index from software */ + in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); @@ -1241,83 +1332,41 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) */ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) { - void __iomem *port_mmio = mv_ap_base(qc->ap); - struct mv_port_priv *pp = qc->ap->private_data; - unsigned in_index; - u32 in_ptr; + struct ata_port *ap = qc->ap; + void __iomem *port_mmio = mv_ap_base(ap); + struct mv_port_priv *pp = ap->private_data; + struct mv_host_priv *hpriv = ap->host->private_data; + u32 in_index; - if (ATA_PROT_DMA != qc->tf.protocol) { + if (qc->tf.protocol != ATA_PROT_DMA) { /* We're about to send a non-EDMA capable command to the * port. Turn off EDMA so there won't be problems accessing * shadow block, etc registers. */ - mv_stop_dma(qc->ap); + __mv_stop_dma(ap); return ata_qc_issue_prot(qc); } - in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); - in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; + mv_start_dma(port_mmio, hpriv, pp); + + in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; /* until we do queuing, the queue should be empty at this point */ WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); - in_index = mv_inc_q_index(in_index); /* now incr producer index */ + pp->req_idx++; - mv_start_dma(port_mmio, pp); + in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT; /* and write the request in pointer to kick the EDMA to life */ - in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; - in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT; - writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); + writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, + port_mmio + EDMA_REQ_Q_IN_PTR_OFS); return 0; } /** - * mv_get_crpb_status - get status from most recently completed cmd - * @ap: ATA channel to manipulate - * - * This routine is for use when the port is in DMA mode, when it - * will be using the CRPB (command response block) method of - * returning command completion information. We check indices - * are good, grab status, and bump the response consumer index to - * prove that we're up to date. - * - * LOCKING: - * Inherited from caller. - */ -static u8 mv_get_crpb_status(struct ata_port *ap) -{ - void __iomem *port_mmio = mv_ap_base(ap); - struct mv_port_priv *pp = ap->private_data; - unsigned out_index; - u32 out_ptr; - u8 ata_status; - - out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); - out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; - - ata_status = le16_to_cpu(pp->crpb[out_index].flags) - >> CRPB_FLAG_STATUS_SHIFT; - - /* increment our consumer index... */ - out_index = mv_inc_q_index(out_index); - - /* and, until we do NCQ, there should only be 1 CRPB waiting */ - WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) - >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); - - /* write out our inc'd consumer index so EDMA knows we're caught up */ - out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; - out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT; - writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); - - /* Return ATA status register for completed CRPB */ - return ata_status; -} - -/** * mv_err_intr - Handle error interrupts on the port * @ap: ATA channel to manipulate * @reset_allowed: bool: 0 == don't trigger from reset here @@ -1331,30 +1380,188 @@ static u8 mv_get_crpb_status(struct ata_port *ap) * LOCKING: * Inherited from caller. */ -static void mv_err_intr(struct ata_port *ap, int reset_allowed) +static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) { void __iomem *port_mmio = mv_ap_base(ap); - u32 edma_err_cause, serr = 0; + u32 edma_err_cause, eh_freeze_mask, serr = 0; + struct mv_port_priv *pp = ap->private_data; + struct mv_host_priv *hpriv = ap->host->private_data; + unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); + unsigned int action = 0, err_mask = 0; + struct ata_eh_info *ehi = &ap->eh_info; - edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); + ata_ehi_clear_desc(ehi); - if (EDMA_ERR_SERR & edma_err_cause) { + if (!edma_enabled) { + /* just a guess: do we need to do this? should we + * expand this, and do it in all cases? + */ sata_scr_read(ap, SCR_ERROR, &serr); sata_scr_write_flush(ap, SCR_ERROR, serr); } - if (EDMA_ERR_SELF_DIS & edma_err_cause) { - struct mv_port_priv *pp = ap->private_data; - pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; + + edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); + + ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause); + + /* + * all generations share these EDMA error cause bits + */ + + if (edma_err_cause & EDMA_ERR_DEV) + err_mask |= AC_ERR_DEV; + if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | + EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | + EDMA_ERR_INTRL_PAR)) { + err_mask |= AC_ERR_ATA_BUS; + action |= ATA_EH_HARDRESET; + ata_ehi_push_desc(ehi, "parity error"); + } + if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { + ata_ehi_hotplugged(ehi); + ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? + "dev disconnect" : "dev connect"); + } + + if (IS_GEN_I(hpriv)) { + eh_freeze_mask = EDMA_EH_FREEZE_5; + + if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { + struct mv_port_priv *pp = ap->private_data; + pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; + ata_ehi_push_desc(ehi, "EDMA self-disable"); + } + } else { + eh_freeze_mask = EDMA_EH_FREEZE; + + if (edma_err_cause & EDMA_ERR_SELF_DIS) { + struct mv_port_priv *pp = ap->private_data; + pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; + ata_ehi_push_desc(ehi, "EDMA self-disable"); + } + + if (edma_err_cause & EDMA_ERR_SERR) { + sata_scr_read(ap, SCR_ERROR, &serr); + sata_scr_write_flush(ap, SCR_ERROR, serr); + err_mask = AC_ERR_ATA_BUS; + action |= ATA_EH_HARDRESET; + } } - DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x " - "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr); /* Clear EDMA now that SERR cleanup done */ writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); - /* check for fatal here and recover if needed */ - if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause)) - mv_stop_and_reset(ap); + if (!err_mask) { + err_mask = AC_ERR_OTHER; + action |= ATA_EH_HARDRESET; + } + + ehi->serror |= serr; + ehi->action |= action; + + if (qc) + qc->err_mask |= err_mask; + else + ehi->err_mask |= err_mask; + + if (edma_err_cause & eh_freeze_mask) + ata_port_freeze(ap); + else + ata_port_abort(ap); +} + +static void mv_intr_pio(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + u8 ata_status; + + /* ignore spurious intr if drive still BUSY */ + ata_status = readb(ap->ioaddr.status_addr); + if (unlikely(ata_status & ATA_BUSY)) + return; + + /* get active ATA command */ + qc = ata_qc_from_tag(ap, ap->active_tag); + if (unlikely(!qc)) /* no active tag */ + return; + if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */ + return; + + /* and finally, complete the ATA command */ + qc->err_mask |= ac_err_mask(ata_status); + ata_qc_complete(qc); +} + +static void mv_intr_edma(struct ata_port *ap) +{ + void __iomem *port_mmio = mv_ap_base(ap); + struct mv_host_priv *hpriv = ap->host->private_data; + struct mv_port_priv *pp = ap->private_data; + struct ata_queued_cmd *qc; + u32 out_index, in_index; + bool work_done = false; + + /* get h/w response queue pointer */ + in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) + >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; + + while (1) { + u16 status; + unsigned int tag; + + /* get s/w response queue last-read pointer, and compare */ + out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK; + if (in_index == out_index) + break; + + /* 50xx: get active ATA command */ + if (IS_GEN_I(hpriv)) + tag = ap->active_tag; + + /* Gen II/IIE: get active ATA command via tag, to enable + * support for queueing. this works transparently for + * queued and non-queued modes. + */ + else if (IS_GEN_II(hpriv)) + tag = (le16_to_cpu(pp->crpb[out_index].id) + >> CRPB_IOID_SHIFT_6) & 0x3f; + + else /* IS_GEN_IIE */ + tag = (le16_to_cpu(pp->crpb[out_index].id) + >> CRPB_IOID_SHIFT_7) & 0x3f; + + qc = ata_qc_from_tag(ap, tag); + + /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS + * bits (WARNING: might not necessarily be associated + * with this command), which -should- be clear + * if all is well + */ + status = le16_to_cpu(pp->crpb[out_index].flags); + if (unlikely(status & 0xff)) { + mv_err_intr(ap, qc); + return; + } + + /* and finally, complete the ATA command */ + if (qc) { + qc->err_mask |= + ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT); + ata_qc_complete(qc); + } + + /* advance software response queue pointer, to + * indicate (after the loop completes) to hardware + * that we have consumed a response queue entry. + */ + work_done = true; + pp->resp_idx++; + } + + if (work_done) + writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | + (out_index << EDMA_RSP_Q_PTR_SHIFT), + port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); } /** @@ -1377,10 +1584,8 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) { void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; void __iomem *hc_mmio = mv_hc_base(mmio, hc); - struct ata_queued_cmd *qc; u32 hc_irq_cause; - int shift, port, port0, hard_port, handled; - unsigned int err_mask; + int port, port0; if (hc == 0) port0 = 0; @@ -1389,79 +1594,95 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) /* we'll need the HC success int register in most cases */ hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); - if (hc_irq_cause) - writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); + if (!hc_irq_cause) + return; + + writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", hc,relevant,hc_irq_cause); for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { - u8 ata_status = 0; struct ata_port *ap = host->ports[port]; struct mv_port_priv *pp = ap->private_data; + int have_err_bits, hard_port, shift; + + if ((!ap) || (ap->flags & ATA_FLAG_DISABLED)) + continue; + + shift = port << 1; /* (port * 2) */ + if (port >= MV_PORTS_PER_HC) { + shift++; /* skip bit 8 in the HC Main IRQ reg */ + } + have_err_bits = ((PORT0_ERR << shift) & relevant); + + if (unlikely(have_err_bits)) { + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) + continue; + + mv_err_intr(ap, qc); + continue; + } hard_port = mv_hardport_from_port(port); /* range 0..3 */ - handled = 0; /* ensure ata_status is set if handled++ */ - /* Note that DEV_IRQ might happen spuriously during EDMA, - * and should be ignored in such cases. - * The cause of this is still under investigation. - */ if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { - /* EDMA: check for response queue interrupt */ - if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { - ata_status = mv_get_crpb_status(ap); - handled = 1; - } + if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) + mv_intr_edma(ap); } else { - /* PIO: check for device (drive) interrupt */ - if ((DEV_IRQ << hard_port) & hc_irq_cause) { - ata_status = readb(ap->ioaddr.status_addr); - handled = 1; - /* ignore spurious intr if drive still BUSY */ - if (ata_status & ATA_BUSY) { - ata_status = 0; - handled = 0; - } - } + if ((DEV_IRQ << hard_port) & hc_irq_cause) + mv_intr_pio(ap); } + } + VPRINTK("EXIT\n"); +} - if (ap && (ap->flags & ATA_FLAG_DISABLED)) - continue; +static void mv_pci_error(struct ata_host *host, void __iomem *mmio) +{ + struct ata_port *ap; + struct ata_queued_cmd *qc; + struct ata_eh_info *ehi; + unsigned int i, err_mask, printed = 0; + u32 err_cause; - err_mask = ac_err_mask(ata_status); + err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS); - shift = port << 1; /* (port * 2) */ - if (port >= MV_PORTS_PER_HC) { - shift++; /* skip bit 8 in the HC Main IRQ reg */ - } - if ((PORT0_ERR << shift) & relevant) { - mv_err_intr(ap, 1); - err_mask |= AC_ERR_OTHER; - handled = 1; - } + dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", + err_cause); + + DPRINTK("All regs @ PCI error\n"); + mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); - if (handled) { + writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); + + for (i = 0; i < host->n_ports; i++) { + ap = host->ports[i]; + if (!ata_port_offline(ap)) { + ehi = &ap->eh_info; + ata_ehi_clear_desc(ehi); + if (!printed++) + ata_ehi_push_desc(ehi, + "PCI err cause 0x%08x", err_cause); + err_mask = AC_ERR_HOST_BUS; + ehi->action = ATA_EH_HARDRESET; qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) { - VPRINTK("port %u IRQ found for qc, " - "ata_status 0x%x\n", port,ata_status); - /* mark qc status appropriately */ - if (!(qc->tf.flags & ATA_TFLAG_POLLING)) { - qc->err_mask |= err_mask; - ata_qc_complete(qc); - } - } + if (qc) + qc->err_mask |= err_mask; + else + ehi->err_mask |= err_mask; + + ata_port_freeze(ap); } } - VPRINTK("EXIT\n"); } /** - * mv_interrupt - + * mv_interrupt - Main interrupt event handler * @irq: unused * @dev_instance: private data; in this case the host structure - * @regs: unused * * Read the read only register to determine if any host * controllers have pending interrupts. If so, call lower level @@ -1477,7 +1698,6 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) struct ata_host *host = dev_instance; unsigned int hc, handled = 0, n_hcs; void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; - struct mv_host_priv *hpriv; u32 irq_stat; irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); @@ -1491,34 +1711,21 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) n_hcs = mv_get_hc_count(host->ports[0]->flags); spin_lock(&host->lock); + if (unlikely(irq_stat & PCI_ERR)) { + mv_pci_error(host, mmio); + handled = 1; + goto out_unlock; /* skip all other HC irq handling */ + } + for (hc = 0; hc < n_hcs; hc++) { u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); if (relevant) { mv_host_intr(host, relevant, hc); - handled++; - } - } - - hpriv = host->private_data; - if (IS_60XX(hpriv)) { - /* deal with the interrupt coalescing bits */ - if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) { - writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO); - writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI); - writelfl(0, mmio + MV_IRQ_COAL_CAUSE); + handled = 1; } } - if (PCI_ERR & irq_stat) { - printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", - readl(mmio + PCI_IRQ_CAUSE_OFS)); - - DPRINTK("All regs @ PCI error\n"); - mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); - - writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); - handled++; - } +out_unlock: spin_unlock(&host->lock); return IRQ_RETVAL(handled); @@ -1549,36 +1756,37 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in) return ofs; } -static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in) +static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val) { void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; void __iomem *addr = mv5_phy_base(mmio, ap->port_no); unsigned int ofs = mv5_scr_offset(sc_reg_in); - if (ofs != 0xffffffffU) - return readl(addr + ofs); - else - return (u32) ofs; + if (ofs != 0xffffffffU) { + *val = readl(addr + ofs); + return 0; + } else + return -EINVAL; } -static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) +static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) { void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; void __iomem *addr = mv5_phy_base(mmio, ap->port_no); unsigned int ofs = mv5_scr_offset(sc_reg_in); - if (ofs != 0xffffffffU) + if (ofs != 0xffffffffU) { writelfl(val, addr + ofs); + return 0; + } else + return -EINVAL; } static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio) { - u8 rev_id; int early_5080; - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); - - early_5080 = (pdev->device == 0x5080) && (rev_id == 0); + early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); if (!early_5080) { u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); @@ -1907,7 +2115,7 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); - if (IS_60XX(hpriv)) { + if (IS_GEN_II(hpriv)) { u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); ifctl |= (1 << 7); /* enable gen2i speed */ ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ @@ -1923,32 +2131,12 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, hpriv->ops->phy_errata(hpriv, mmio, port_no); - if (IS_50XX(hpriv)) + if (IS_GEN_I(hpriv)) mdelay(1); } -static void mv_stop_and_reset(struct ata_port *ap) -{ - struct mv_host_priv *hpriv = ap->host->private_data; - void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; - - mv_stop_dma(ap); - - mv_channel_reset(hpriv, mmio, ap->port_no); - - __mv_phy_reset(ap, 0); -} - -static inline void __msleep(unsigned int msec, int can_sleep) -{ - if (can_sleep) - msleep(msec); - else - mdelay(msec); -} - /** - * __mv_phy_reset - Perform eDMA reset followed by COMRESET + * mv_phy_reset - Perform eDMA reset followed by COMRESET * @ap: ATA channel to manipulate * * Part of this is taken from __sata_phy_reset and modified to @@ -1958,57 +2146,65 @@ static inline void __msleep(unsigned int msec, int can_sleep) * Inherited from caller. This is coded to safe to call at * interrupt level, i.e. it does not sleep. */ -static void __mv_phy_reset(struct ata_port *ap, int can_sleep) +static void mv_phy_reset(struct ata_port *ap, unsigned int *class, + unsigned long deadline) { struct mv_port_priv *pp = ap->private_data; struct mv_host_priv *hpriv = ap->host->private_data; void __iomem *port_mmio = mv_ap_base(ap); - struct ata_taskfile tf; - struct ata_device *dev = &ap->device[0]; - unsigned long timeout; int retry = 5; u32 sstatus; VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); - DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " - "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), - mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); +#ifdef DEBUG + { + u32 sstatus, serror, scontrol; + + mv_scr_read(ap, SCR_STATUS, &sstatus); + mv_scr_read(ap, SCR_ERROR, &serror); + mv_scr_read(ap, SCR_CONTROL, &scontrol); + DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " + "SCtrl 0x%08x\n", status, serror, scontrol); + } +#endif /* Issue COMRESET via SControl */ comreset_retry: sata_scr_write_flush(ap, SCR_CONTROL, 0x301); - __msleep(1, can_sleep); + msleep(1); sata_scr_write_flush(ap, SCR_CONTROL, 0x300); - __msleep(20, can_sleep); + msleep(20); - timeout = jiffies + msecs_to_jiffies(200); do { sata_scr_read(ap, SCR_STATUS, &sstatus); if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) break; - __msleep(1, can_sleep); - } while (time_before(jiffies, timeout)); + msleep(1); + } while (time_before(jiffies, deadline)); /* work around errata */ - if (IS_60XX(hpriv) && + if (IS_GEN_II(hpriv) && (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) && (retry-- > 0)) goto comreset_retry; - DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x " - "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), - mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); +#ifdef DEBUG + { + u32 sstatus, serror, scontrol; - if (ata_port_online(ap)) { - ata_port_probe(ap); - } else { - sata_scr_read(ap, SCR_STATUS, &sstatus); - ata_port_printk(ap, KERN_INFO, - "no device found (phy stat %08x)\n", sstatus); - ata_port_disable(ap); + mv_scr_read(ap, SCR_STATUS, &sstatus); + mv_scr_read(ap, SCR_ERROR, &serror); + mv_scr_read(ap, SCR_CONTROL, &scontrol); + DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x " + "SCtrl 0x%08x\n", sstatus, serror, scontrol); + } +#endif + + if (ata_port_offline(ap)) { + *class = ATA_DEV_NONE; return; } @@ -2022,68 +2218,152 @@ comreset_retry: u8 drv_stat = ata_check_status(ap); if ((drv_stat != 0x80) && (drv_stat != 0x7f)) break; - __msleep(500, can_sleep); + msleep(500); if (retry-- <= 0) break; + if (time_after(jiffies, deadline)) + break; } - tf.lbah = readb(ap->ioaddr.lbah_addr); - tf.lbam = readb(ap->ioaddr.lbam_addr); - tf.lbal = readb(ap->ioaddr.lbal_addr); - tf.nsect = readb(ap->ioaddr.nsect_addr); + /* FIXME: if we passed the deadline, the following + * code probably produces an invalid result + */ - dev->class = ata_dev_classify(&tf); - if (!ata_dev_enabled(dev)) { - VPRINTK("Port disabled post-sig: No device present.\n"); - ata_port_disable(ap); - } + /* finally, read device signature from TF registers */ + *class = ata_dev_try_classify(ap, 0, NULL); writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); - pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; + WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN); VPRINTK("EXIT\n"); } -static void mv_phy_reset(struct ata_port *ap) +static int mv_prereset(struct ata_port *ap, unsigned long deadline) { - __mv_phy_reset(ap, 1); + struct mv_port_priv *pp = ap->private_data; + struct ata_eh_context *ehc = &ap->eh_context; + int rc; + + rc = mv_stop_dma(ap); + if (rc) + ehc->i.action |= ATA_EH_HARDRESET; + + if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) { + pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET; + ehc->i.action |= ATA_EH_HARDRESET; + } + + /* if we're about to do hardreset, nothing more to do */ + if (ehc->i.action & ATA_EH_HARDRESET) + return 0; + + if (ata_port_online(ap)) + rc = ata_wait_ready(ap, deadline); + else + rc = -ENODEV; + + return rc; } -/** - * mv_eng_timeout - Routine called by libata when SCSI times out I/O - * @ap: ATA channel to manipulate - * - * Intent is to clear all pending error conditions, reset the - * chip/bus, fail the command, and move on. - * - * LOCKING: - * This routine holds the host lock while failing the command. - */ -static void mv_eng_timeout(struct ata_port *ap) +static int mv_hardreset(struct ata_port *ap, unsigned int *class, + unsigned long deadline) { + struct mv_host_priv *hpriv = ap->host->private_data; void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; - struct ata_queued_cmd *qc; - unsigned long flags; - ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n"); - DPRINTK("All regs @ start of eng_timeout\n"); - mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev)); + mv_stop_dma(ap); - qc = ata_qc_from_tag(ap, ap->active_tag); - printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", - mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd); + mv_channel_reset(hpriv, mmio, ap->port_no); - spin_lock_irqsave(&ap->host->lock, flags); - mv_err_intr(ap, 0); - mv_stop_and_reset(ap); - spin_unlock_irqrestore(&ap->host->lock, flags); + mv_phy_reset(ap, class, deadline); - WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); - if (qc->flags & ATA_QCFLAG_ACTIVE) { - qc->err_mask |= AC_ERR_TIMEOUT; - ata_eh_qc_complete(qc); + return 0; +} + +static void mv_postreset(struct ata_port *ap, unsigned int *classes) +{ + u32 serr; + + /* print link status */ + sata_print_link_status(ap); + + /* clear SError */ + sata_scr_read(ap, SCR_ERROR, &serr); + sata_scr_write_flush(ap, SCR_ERROR, serr); + + /* bail out if no device is present */ + if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { + DPRINTK("EXIT, no device\n"); + return; + } + + /* set up device control */ + iowrite8(ap->ctl, ap->ioaddr.ctl_addr); +} + +static void mv_error_handler(struct ata_port *ap) +{ + ata_do_eh(ap, mv_prereset, ata_std_softreset, + mv_hardreset, mv_postreset); +} + +static void mv_post_int_cmd(struct ata_queued_cmd *qc) +{ + mv_stop_dma(qc->ap); +} + +static void mv_eh_freeze(struct ata_port *ap) +{ + void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; + unsigned int hc = (ap->port_no > 3) ? 1 : 0; + u32 tmp, mask; + unsigned int shift; + + /* FIXME: handle coalescing completion events properly */ + + shift = ap->port_no * 2; + if (hc > 0) + shift++; + + mask = 0x3 << shift; + + /* disable assertion of portN err, done events */ + tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS); + writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS); +} + +static void mv_eh_thaw(struct ata_port *ap) +{ + void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; + unsigned int hc = (ap->port_no > 3) ? 1 : 0; + void __iomem *hc_mmio = mv_hc_base(mmio, hc); + void __iomem *port_mmio = mv_ap_base(ap); + u32 tmp, mask, hc_irq_cause; + unsigned int shift, hc_port_no = ap->port_no; + + /* FIXME: handle coalescing completion events properly */ + + shift = ap->port_no * 2; + if (hc > 0) { + shift++; + hc_port_no -= 4; } + + mask = 0x3 << shift; + + /* clear EDMA errors on this port */ + writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); + + /* clear pending irq events */ + hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); + hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */ + hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */ + writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); + + /* enable assertion of portN err, done events */ + tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS); + writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS); } /** @@ -2139,17 +2419,14 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) { struct pci_dev *pdev = to_pci_dev(host->dev); struct mv_host_priv *hpriv = host->private_data; - u8 rev_id; u32 hp_flags = hpriv->hp_flags; - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); - switch(board_idx) { case chip_5080: hpriv->ops = &mv5xxx_ops; - hp_flags |= MV_HP_50XX; + hp_flags |= MV_HP_GEN_I; - switch (rev_id) { + switch (pdev->revision) { case 0x1: hp_flags |= MV_HP_ERRATA_50XXB0; break; @@ -2167,9 +2444,9 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) case chip_504x: case chip_508x: hpriv->ops = &mv5xxx_ops; - hp_flags |= MV_HP_50XX; + hp_flags |= MV_HP_GEN_I; - switch (rev_id) { + switch (pdev->revision) { case 0x0: hp_flags |= MV_HP_ERRATA_50XXB0; break; @@ -2187,8 +2464,9 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) case chip_604x: case chip_608x: hpriv->ops = &mv6xxx_ops; + hp_flags |= MV_HP_GEN_II; - switch (rev_id) { + switch (pdev->revision) { case 0x7: hp_flags |= MV_HP_ERRATA_60X1B2; break; @@ -2206,10 +2484,9 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) case chip_7042: case chip_6042: hpriv->ops = &mv6xxx_ops; - hp_flags |= MV_HP_GEN_IIE; - switch (rev_id) { + switch (pdev->revision) { case 0x0: hp_flags |= MV_HP_ERRATA_XX42A0; break; @@ -2273,7 +2550,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) hpriv->ops->enable_leds(hpriv, mmio); for (port = 0; port < host->n_ports; port++) { - if (IS_60XX(hpriv)) { + if (IS_GEN_II(hpriv)) { void __iomem *port_mmio = mv_port_base(mmio, port); u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); @@ -2308,7 +2585,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) /* and unmask interrupt generation for host regs */ writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); - if (IS_50XX(hpriv)) + if (IS_GEN_I(hpriv)) writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS); else writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); @@ -2337,14 +2614,12 @@ static void mv_print_info(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); struct mv_host_priv *hpriv = host->private_data; - u8 rev_id, scc; + u8 scc; const char *scc_s, *gen; /* Use this to determine the HW stepping of the chip so we know * what errata to workaround */ - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); - pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); if (scc == 0) scc_s = "SCSI"; @@ -2426,8 +2701,9 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) mv_print_info(host); pci_set_master(pdev); + pci_try_set_mwi(pdev); return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, - &mv_sht); + IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); } static int __init mv_init(void) diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index b2656867c64..0b58c4df6fd 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -236,8 +236,8 @@ static void nv_ck804_host_stop(struct ata_host *host); static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance); static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance); -static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static int nv_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val); +static int nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); static void nv_nf2_freeze(struct ata_port *ap); static void nv_nf2_thaw(struct ata_port *ap); @@ -715,19 +715,20 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) int freeze = 0; ata_ehi_clear_desc(ehi); - ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags ); + __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags ); if (flags & NV_CPB_RESP_ATA_ERR) { - ata_ehi_push_desc(ehi, ": ATA error"); + ata_ehi_push_desc(ehi, "ATA error"); ehi->err_mask |= AC_ERR_DEV; } else if (flags & NV_CPB_RESP_CMD_ERR) { - ata_ehi_push_desc(ehi, ": CMD error"); + ata_ehi_push_desc(ehi, "CMD error"); ehi->err_mask |= AC_ERR_DEV; } else if (flags & NV_CPB_RESP_CPB_ERR) { - ata_ehi_push_desc(ehi, ": CPB error"); + ata_ehi_push_desc(ehi, "CPB error"); ehi->err_mask |= AC_ERR_SYSTEM; freeze = 1; } else { /* notifier error, but no error in CPB flags? */ + ata_ehi_push_desc(ehi, "unknown"); ehi->err_mask |= AC_ERR_OTHER; freeze = 1; } @@ -854,20 +855,21 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) struct ata_eh_info *ehi = &ap->eh_info; ata_ehi_clear_desc(ehi); - ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status ); + __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status ); if (status & NV_ADMA_STAT_TIMEOUT) { ehi->err_mask |= AC_ERR_SYSTEM; - ata_ehi_push_desc(ehi, ": timeout"); + ata_ehi_push_desc(ehi, "timeout"); } else if (status & NV_ADMA_STAT_HOTPLUG) { ata_ehi_hotplugged(ehi); - ata_ehi_push_desc(ehi, ": hotplug"); + ata_ehi_push_desc(ehi, "hotplug"); } else if (status & NV_ADMA_STAT_HOTUNPLUG) { ata_ehi_hotplugged(ehi); - ata_ehi_push_desc(ehi, ": hot unplug"); + ata_ehi_push_desc(ehi, "hot unplug"); } else if (status & NV_ADMA_STAT_SERROR) { /* let libata analyze SError and figure out the cause */ - ata_ehi_push_desc(ehi, ": SError"); - } + ata_ehi_push_desc(ehi, "SError"); + } else + ata_ehi_push_desc(ehi, "unknown"); ata_port_freeze(ap); continue; } @@ -1391,20 +1393,22 @@ static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance) return ret; } -static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) - return 0xffffffffU; + return -EINVAL; - return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4)); + *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; } -static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) - return; + return -EINVAL; iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; } static void nv_nf2_freeze(struct ata_port *ap) @@ -1560,7 +1564,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) } ppi[0] = &nv_port_info[type]; - rc = ata_pci_prepare_native_host(pdev, ppi, &host); + rc = ata_pci_prepare_sff_host(pdev, ppi, &host); if (rc) return rc; diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 2ad5872fe90..d39ebc23c4a 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -45,7 +45,7 @@ #include "sata_promise.h" #define DRV_NAME "sata_promise" -#define DRV_VERSION "2.08" +#define DRV_VERSION "2.09" enum { PDC_MAX_PORTS = 4, @@ -128,8 +128,8 @@ struct pdc_port_priv { dma_addr_t pkt_dma; }; -static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); +static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); static int pdc_common_port_start(struct ata_port *ap); static int pdc_sata_port_start(struct ata_port *ap); @@ -427,19 +427,20 @@ static int pdc_sata_cable_detect(struct ata_port *ap) return ATA_CBL_SATA; } -static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) - return 0xffffffffU; - return readl(ap->ioaddr.scr_addr + (sc_reg * 4)); + return -EINVAL; + *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; } -static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, - u32 val) +static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) - return; + return -EINVAL; writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; } static void pdc_atapi_pkt(struct ata_queued_cmd *qc) @@ -642,8 +643,12 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc, | PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR)) ac_err_mask |= AC_ERR_HOST_BUS; - if (sata_scr_valid(ap)) - ehi->serror |= pdc_sata_scr_read(ap, SCR_ERROR); + if (sata_scr_valid(ap)) { + u32 serror; + + pdc_sata_scr_read(ap, SCR_ERROR, &serror); + ehi->serror |= serror; + } qc->err_mask |= ac_err_mask; @@ -716,6 +721,9 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance) unsigned int i, tmp; unsigned int handled = 0; void __iomem *mmio_base; + unsigned int hotplug_offset, ata_no; + u32 hotplug_status; + int is_sataii_tx4; VPRINTK("ENTER\n"); @@ -726,10 +734,20 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance) mmio_base = host->iomap[PDC_MMIO_BAR]; + /* read and clear hotplug flags for all ports */ + if (host->ports[0]->flags & PDC_FLAG_GEN_II) + hotplug_offset = PDC2_SATA_PLUG_CSR; + else + hotplug_offset = PDC_SATA_PLUG_CSR; + hotplug_status = readl(mmio_base + hotplug_offset); + if (hotplug_status & 0xff) + writel(hotplug_status | 0xff, mmio_base + hotplug_offset); + hotplug_status &= 0xff; /* clear uninteresting bits */ + /* reading should also clear interrupts */ mask = readl(mmio_base + PDC_INT_SEQMASK); - if (mask == 0xffffffff) { + if (mask == 0xffffffff && hotplug_status == 0) { VPRINTK("QUICK EXIT 2\n"); return IRQ_NONE; } @@ -737,16 +755,34 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance) spin_lock(&host->lock); mask &= 0xffff; /* only 16 tags possible */ - if (!mask) { + if (mask == 0 && hotplug_status == 0) { VPRINTK("QUICK EXIT 3\n"); goto done_irq; } writel(mask, mmio_base + PDC_INT_SEQMASK); + is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags); + for (i = 0; i < host->n_ports; i++) { VPRINTK("port %u\n", i); ap = host->ports[i]; + + /* check for a plug or unplug event */ + ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4); + tmp = hotplug_status & (0x11 << ata_no); + if (tmp && ap && + !(ap->flags & ATA_FLAG_DISABLED)) { + struct ata_eh_info *ehi = &ap->eh_info; + ata_ehi_clear_desc(ehi); + ata_ehi_hotplugged(ehi); + ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp); + ata_port_freeze(ap); + ++handled; + continue; + } + + /* check for a packet interrupt */ tmp = mask & (1 << (i + 1)); if (tmp && ap && !(ap->flags & ATA_FLAG_DISABLED)) { @@ -902,9 +938,9 @@ static void pdc_host_init(struct ata_host *host) tmp = readl(mmio + hotplug_offset); writel(tmp | 0xff, mmio + hotplug_offset); - /* mask plug/unplug ints */ + /* unmask plug/unplug ints */ tmp = readl(mmio + hotplug_offset); - writel(tmp | 0xff0000, mmio + hotplug_offset); + writel(tmp & ~0xff0000, mmio + hotplug_offset); /* don't initialise TBG or SLEW on 2nd generation chips */ if (is_gen2) diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index 9ab554da89b..c8f9242e7f4 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c @@ -111,8 +111,8 @@ struct qs_port_priv { qs_state_t state; }; -static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); +static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); static int qs_port_start(struct ata_port *ap); static void qs_host_stop(struct ata_host *host); @@ -255,18 +255,20 @@ static void qs_eng_timeout(struct ata_port *ap) ata_eng_timeout(ap); } -static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) - return ~0U; - return readl(ap->ioaddr.scr_addr + (sc_reg * 8)); + return -EINVAL; + *val = readl(ap->ioaddr.scr_addr + (sc_reg * 8)); + return 0; } -static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) - return; + return -EINVAL; writel(val, ap->ioaddr.scr_addr + (sc_reg * 8)); + return 0; } static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) @@ -337,7 +339,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) buf[28] = dflags; /* frame information structure (FIS) */ - ata_tf_to_fis(&qc->tf, &buf[32], 0); + ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]); } static inline void qs_packet_start(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 2a86dc4598d..db676375895 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -115,8 +115,8 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); static int sil_pci_device_resume(struct pci_dev *pdev); #endif static void sil_dev_config(struct ata_device *dev); -static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); +static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed); static void sil_freeze(struct ata_port *ap); static void sil_thaw(struct ata_port *ap); @@ -350,19 +350,26 @@ static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_re return NULL; } -static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) { void __iomem *mmio = sil_scr_addr(ap, sc_reg); - if (mmio) - return readl(mmio); - return 0xffffffffU; + + if (mmio) { + *val = readl(mmio); + return 0; + } + return -EINVAL; } -static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) { void __iomem *mmio = sil_scr_addr(ap, sc_reg); - if (mmio) + + if (mmio) { writel(val, mmio); + return 0; + } + return -EINVAL; } static void sil_host_intr(struct ata_port *ap, u32 bmdma2) @@ -378,7 +385,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) * controllers continue to assert IRQ as long as * SError bits are pending. Clear SError immediately. */ - serror = sil_scr_read(ap, SCR_ERROR); + sil_scr_read(ap, SCR_ERROR, &serror); sil_scr_write(ap, SCR_ERROR, serror); /* Trigger hotplug and accumulate SError only if the diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index ac43a30ebe2..46fbbe7f121 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -326,8 +326,8 @@ struct sil24_port_priv { static void sil24_dev_config(struct ata_device *dev); static u8 sil24_check_status(struct ata_port *ap); -static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); -static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); +static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val); +static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); static void sil24_qc_prep(struct ata_queued_cmd *qc); static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); @@ -464,15 +464,15 @@ static void sil24_dev_config(struct ata_device *dev) writel(PORT_CS_CDB16, port + PORT_CTRL_CLR); } -static inline void sil24_update_tf(struct ata_port *ap) +static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf) { - struct sil24_port_priv *pp = ap->private_data; void __iomem *port = ap->ioaddr.cmd_addr; - struct sil24_prb __iomem *prb = port; + struct sil24_prb __iomem *prb; u8 fis[6 * 4]; - memcpy_fromio(fis, prb->fis, 6 * 4); - ata_tf_from_fis(fis, &pp->tf); + prb = port + PORT_LRAM + sil24_tag(tag) * PORT_LRAM_SLOT_SZ; + memcpy_fromio(fis, prb->fis, sizeof(fis)); + ata_tf_from_fis(fis, tf); } static u8 sil24_check_status(struct ata_port *ap) @@ -488,25 +488,30 @@ static int sil24_scr_map[] = { [SCR_ACTIVE] = 3, }; -static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg) +static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) { void __iomem *scr_addr = ap->ioaddr.scr_addr; + if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { void __iomem *addr; addr = scr_addr + sil24_scr_map[sc_reg] * 4; - return readl(scr_addr + sil24_scr_map[sc_reg] * 4); + *val = readl(scr_addr + sil24_scr_map[sc_reg] * 4); + return 0; } - return 0xffffffffU; + return -EINVAL; } -static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) +static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) { void __iomem *scr_addr = ap->ioaddr.scr_addr; + if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { void __iomem *addr; addr = scr_addr + sil24_scr_map[sc_reg] * 4; writel(val, scr_addr + sil24_scr_map[sc_reg] * 4); + return 0; } + return -EINVAL; } static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf) @@ -531,15 +536,60 @@ static int sil24_init_port(struct ata_port *ap) return 0; } -static int sil24_softreset(struct ata_port *ap, unsigned int *class, - unsigned long deadline) +static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp, + const struct ata_taskfile *tf, + int is_cmd, u32 ctrl, + unsigned long timeout_msec) { void __iomem *port = ap->ioaddr.cmd_addr; struct sil24_port_priv *pp = ap->private_data; struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; dma_addr_t paddr = pp->cmd_block_dma; - u32 mask, irq_stat; + u32 irq_enabled, irq_mask, irq_stat; + int rc; + + prb->ctrl = cpu_to_le16(ctrl); + ata_tf_to_fis(tf, pmp, is_cmd, prb->fis); + + /* temporarily plug completion and error interrupts */ + irq_enabled = readl(port + PORT_IRQ_ENABLE_SET); + writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR); + + writel((u32)paddr, port + PORT_CMD_ACTIVATE); + writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4); + + irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT; + irq_stat = ata_wait_register(port + PORT_IRQ_STAT, irq_mask, 0x0, + 10, timeout_msec); + + writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */ + irq_stat >>= PORT_IRQ_RAW_SHIFT; + + if (irq_stat & PORT_IRQ_COMPLETE) + rc = 0; + else { + /* force port into known state */ + sil24_init_port(ap); + + if (irq_stat & PORT_IRQ_ERROR) + rc = -EIO; + else + rc = -EBUSY; + } + + /* restore IRQ enabled */ + writel(irq_enabled, port + PORT_IRQ_ENABLE_SET); + + return rc; +} + +static int sil24_do_softreset(struct ata_port *ap, unsigned int *class, + int pmp, unsigned long deadline) +{ + unsigned long timeout_msec = 0; + struct ata_taskfile tf; const char *reason; + int rc; DPRINTK("ENTER\n"); @@ -556,29 +606,22 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class, } /* do SRST */ - prb->ctrl = cpu_to_le16(PRB_CTRL_SRST); - prb->fis[1] = 0; /* no PMP yet */ - - writel((u32)paddr, port + PORT_CMD_ACTIVATE); - writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4); - - mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT; - irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0, - 100, jiffies_to_msecs(deadline - jiffies)); - - writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */ - irq_stat >>= PORT_IRQ_RAW_SHIFT; - - if (!(irq_stat & PORT_IRQ_COMPLETE)) { - if (irq_stat & PORT_IRQ_ERROR) - reason = "SRST command error"; - else - reason = "timeout"; + if (time_after(deadline, jiffies)) + timeout_msec = jiffies_to_msecs(deadline - jiffies); + + ata_tf_init(ap->device, &tf); /* doesn't really matter */ + rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST, + timeout_msec); + if (rc == -EBUSY) { + reason = "timeout"; + goto err; + } else if (rc) { + reason = "SRST command error"; goto err; } - sil24_update_tf(ap); - *class = ata_dev_classify(&pp->tf); + sil24_read_tf(ap, 0, &tf); + *class = ata_dev_classify(&tf); if (*class == ATA_DEV_UNKNOWN) *class = ATA_DEV_NONE; @@ -592,6 +635,12 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class, return -EIO; } +static int sil24_softreset(struct ata_port *ap, unsigned int *class, + unsigned long deadline) +{ + return sil24_do_softreset(ap, class, 0, deadline); +} + static int sil24_hardreset(struct ata_port *ap, unsigned int *class, unsigned long deadline) { @@ -699,7 +748,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc) } prb->ctrl = cpu_to_le16(ctrl); - ata_tf_to_fis(&qc->tf, prb->fis, 0); + ata_tf_to_fis(&qc->tf, 0, 1, prb->fis); if (qc->flags & ATA_QCFLAG_DMAMAP) sil24_fill_sg(qc, sge); @@ -754,6 +803,7 @@ static void sil24_thaw(struct ata_port *ap) static void sil24_error_intr(struct ata_port *ap) { void __iomem *port = ap->ioaddr.cmd_addr; + struct sil24_port_priv *pp = ap->private_data; struct ata_eh_info *ehi = &ap->eh_info; int freeze = 0; u32 irq_stat; @@ -769,16 +819,16 @@ static void sil24_error_intr(struct ata_port *ap) if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) { ata_ehi_hotplugged(ehi); - ata_ehi_push_desc(ehi, ", %s", - irq_stat & PORT_IRQ_PHYRDY_CHG ? - "PHY RDY changed" : "device exchanged"); + ata_ehi_push_desc(ehi, "%s", + irq_stat & PORT_IRQ_PHYRDY_CHG ? + "PHY RDY changed" : "device exchanged"); freeze = 1; } if (irq_stat & PORT_IRQ_UNK_FIS) { ehi->err_mask |= AC_ERR_HSM; ehi->action |= ATA_EH_SOFTRESET; - ata_ehi_push_desc(ehi , ", unknown FIS"); + ata_ehi_push_desc(ehi, "unknown FIS"); freeze = 1; } @@ -797,18 +847,18 @@ static void sil24_error_intr(struct ata_port *ap) if (ci && ci->desc) { err_mask |= ci->err_mask; action |= ci->action; - ata_ehi_push_desc(ehi, ", %s", ci->desc); + ata_ehi_push_desc(ehi, "%s", ci->desc); } else { err_mask |= AC_ERR_OTHER; action |= ATA_EH_SOFTRESET; - ata_ehi_push_desc(ehi, ", unknown command error %d", + ata_ehi_push_desc(ehi, "unknown command error %d", cerr); } /* record error info */ qc = ata_qc_from_tag(ap, ap->active_tag); if (qc) { - sil24_update_tf(ap); + sil24_read_tf(ap, qc->tag, &pp->tf); qc->err_mask |= err_mask; } else ehi->err_mask |= err_mask; @@ -825,8 +875,11 @@ static void sil24_error_intr(struct ata_port *ap) static void sil24_finish_qc(struct ata_queued_cmd *qc) { + struct ata_port *ap = qc->ap; + struct sil24_port_priv *pp = ap->private_data; + if (qc->flags & ATA_QCFLAG_RESULT_TF) - sil24_update_tf(qc->ap); + sil24_read_tf(ap, qc->tag, &pp->tf); } static inline void sil24_host_intr(struct ata_port *ap) diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index fd80bcf1b23..31a2f55aae6 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c @@ -64,8 +64,8 @@ enum { }; static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); -static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static int sis_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val); +static int sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); static const struct pci_device_id sis_pci_tbl[] = { { PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */ @@ -207,36 +207,37 @@ static void sis_scr_cfg_write (struct ata_port *ap, unsigned int sc_reg, u32 val pci_write_config_dword(pdev, cfg_addr+0x10, val); } -static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u32 val, val2 = 0; u8 pmr; if (sc_reg > SCR_CONTROL) - return 0xffffffffU; + return -EINVAL; if (ap->flags & SIS_FLAG_CFGSCR) return sis_scr_cfg_read(ap, sc_reg); pci_read_config_byte(pdev, SIS_PMR, &pmr); - val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4)); + *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4)); if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED)) - val2 = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10); + *val |= ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10); + + *val &= 0xfffffffb; - return (val | val2) & 0xfffffffb; + return 0; } -static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 pmr; if (sc_reg > SCR_CONTROL) - return; + return -EINVAL; pci_read_config_byte(pdev, SIS_PMR, &pmr); @@ -248,6 +249,7 @@ static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED)) iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10); } + return 0; } static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) @@ -334,7 +336,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) break; } - rc = ata_pci_prepare_native_host(pdev, ppi, &host); + rc = ata_pci_prepare_sff_host(pdev, ppi, &host); if (rc) return rc; diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 63fe99afd59..92e87707503 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -103,20 +103,21 @@ static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc) return 0; } -static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) - return 0xffffffffU; - return readl(ap->ioaddr.scr_addr + (sc_reg * 4)); + return -EINVAL; + *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; } -static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, - u32 val) +static int k2_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) - return; + return -EINVAL; writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; } diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c index aca71819f6e..78c28512f01 100644 --- a/drivers/ata/sata_uli.c +++ b/drivers/ata/sata_uli.c @@ -57,8 +57,8 @@ struct uli_priv { }; static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); -static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static int uli_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val); +static int uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); static const struct pci_device_id uli_pci_tbl[] = { { PCI_VDEVICE(AL, 0x5289), uli_5289 }, @@ -164,20 +164,22 @@ static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val) pci_write_config_dword(pdev, cfg_addr, val); } -static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int uli_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) - return 0xffffffffU; + return -EINVAL; - return uli_scr_cfg_read(ap, sc_reg); + *val = uli_scr_cfg_read(ap, sc_reg); + return 0; } -static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +static int uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0 - return; + return -EINVAL; uli_scr_cfg_write(ap, sc_reg, val); + return 0; } static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) @@ -213,7 +215,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) host->private_data = hpriv; /* the first two ports are standard SFF */ - rc = ata_pci_init_native_host(host); + rc = ata_pci_init_sff_host(host); if (rc) return rc; diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index a4c0832033d..86b7bfc1732 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -72,8 +72,8 @@ enum { }; static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); -static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg); -static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); +static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); static void svia_noop_freeze(struct ata_port *ap); static void vt6420_error_handler(struct ata_port *ap); static int vt6421_pata_cable_detect(struct ata_port *ap); @@ -249,18 +249,20 @@ MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, svia_pci_tbl); MODULE_VERSION(DRV_VERSION); -static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) - return 0xffffffffU; - return ioread32(ap->ioaddr.scr_addr + (4 * sc_reg)); + return -EINVAL; + *val = ioread32(ap->ioaddr.scr_addr + (4 * sc_reg)); + return 0; } -static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) - return; + return -EINVAL; iowrite32(val, ap->ioaddr.scr_addr + (4 * sc_reg)); + return 0; } static void svia_noop_freeze(struct ata_port *ap) @@ -305,18 +307,19 @@ static int vt6420_prereset(struct ata_port *ap, unsigned long deadline) /* Resume phy. This is the old SATA resume sequence */ svia_scr_write(ap, SCR_CONTROL, 0x300); - svia_scr_read(ap, SCR_CONTROL); /* flush */ + svia_scr_read(ap, SCR_CONTROL, &scontrol); /* flush */ /* wait for phy to become ready, if necessary */ do { msleep(200); - if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1) + svia_scr_read(ap, SCR_STATUS, &sstatus); + if ((sstatus & 0xf) != 1) break; } while (time_before(jiffies, timeout)); /* open code sata_print_link_status() */ - sstatus = svia_scr_read(ap, SCR_STATUS); - scontrol = svia_scr_read(ap, SCR_CONTROL); + svia_scr_read(ap, SCR_STATUS, &sstatus); + svia_scr_read(ap, SCR_CONTROL, &scontrol); online = (sstatus & 0xf) == 0x3; @@ -325,7 +328,7 @@ static int vt6420_prereset(struct ata_port *ap, unsigned long deadline) online ? "up" : "down", sstatus, scontrol); /* SStatus is read one more time */ - svia_scr_read(ap, SCR_STATUS); + svia_scr_read(ap, SCR_STATUS, &sstatus); if (!online) { /* tell EH to bail */ @@ -412,7 +415,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) struct ata_host *host; int rc; - rc = ata_pci_prepare_native_host(pdev, ppi, &host); + rc = ata_pci_prepare_sff_host(pdev, ppi, &host); if (rc) return rc; *r_host = host; diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c index 1b5d81faa10..24344d0d057 100644 --- a/drivers/ata/sata_vsc.c +++ b/drivers/ata/sata_vsc.c @@ -98,20 +98,21 @@ enum { VSC_SATA_INT_PHY_CHANGE), }; -static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) +static int vsc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) - return 0xffffffffU; - return readl(ap->ioaddr.scr_addr + (sc_reg * 4)); + return -EINVAL; + *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; } -static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, - u32 val) +static int vsc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) - return; + return -EINVAL; writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); + return 0; } |