diff options
Diffstat (limited to 'drivers/ata/libata-sff.c')
| -rw-r--r-- | drivers/ata/libata-sff.c | 301 |
1 files changed, 150 insertions, 151 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index efa4a18cfb9..1121153f1ec 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -1,7 +1,7 @@ /* * libata-sff.c - helper library for PCI IDE BMDMA * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -35,6 +35,7 @@ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/pci.h> +#include <linux/module.h> #include <linux/libata.h> #include <linux/highmem.h> @@ -222,19 +223,19 @@ int ata_sff_busy_sleep(struct ata_port *ap, timeout = ata_deadline(timer_start, tmout_pat); while (status != 0xff && (status & ATA_BUSY) && time_before(jiffies, timeout)) { - msleep(50); + ata_msleep(ap, 50); status = ata_sff_busy_wait(ap, ATA_BUSY, 3); } if (status != 0xff && (status & ATA_BUSY)) - ata_port_printk(ap, KERN_WARNING, - "port is slow to respond, please be patient " - "(Status 0x%x)\n", status); + ata_port_warn(ap, + "port is slow to respond, please be patient (Status 0x%x)\n", + status); timeout = ata_deadline(timer_start, tmout); while (status != 0xff && (status & ATA_BUSY) && time_before(jiffies, timeout)) { - msleep(50); + ata_msleep(ap, 50); status = ap->ops->sff_check_status(ap); } @@ -242,9 +243,9 @@ int ata_sff_busy_sleep(struct ata_port *ap, return -ENODEV; if (status & ATA_BUSY) { - ata_port_printk(ap, KERN_ERR, "port failed to respond " - "(%lu secs, Status 0x%x)\n", - DIV_ROUND_UP(tmout, 1000), status); + ata_port_err(ap, + "port failed to respond (%lu secs, Status 0x%x)\n", + DIV_ROUND_UP(tmout, 1000), status); return -EBUSY; } @@ -350,8 +351,8 @@ static void ata_dev_select(struct ata_port *ap, unsigned int device, unsigned int wait, unsigned int can_sleep) { if (ata_msg_probe(ap)) - ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, " - "device %u, wait %u\n", device, wait); + ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n", + device, wait); if (wait) ata_wait_idle(ap); @@ -360,7 +361,7 @@ static void ata_dev_select(struct ata_port *ap, unsigned int device, if (wait) { if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) - msleep(150); + ata_msleep(ap, 150); ata_wait_idle(ap); } } @@ -418,6 +419,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) if (ioaddr->ctl_addr) iowrite8(tf->ctl, ioaddr->ctl_addr); ap->last_ctl = tf->ctl; + ata_wait_idle(ap); } if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { @@ -453,6 +455,8 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) iowrite8(tf->device, ioaddr->device_addr); VPRINTK("device 0x%X\n", tf->device); } + + ata_wait_idle(ap); } EXPORT_SYMBOL_GPL(ata_sff_tf_load); @@ -566,7 +570,7 @@ unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf, /* Transfer trailing byte, if any. */ if (unlikely(buflen & 0x01)) { - unsigned char pad[2]; + unsigned char pad[2] = { }; /* Point buf to the tail of buffer */ buf += buflen - 1; @@ -625,7 +629,7 @@ unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf, /* Transfer trailing bytes, if any */ if (unlikely(slop)) { - unsigned char pad[4]; + unsigned char pad[4] = { }; /* Point buf to the tail of buffer */ buf += buflen - slop; @@ -675,7 +679,7 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, unsigned int consumed; local_irq_save(flags); - consumed = ata_sff_data_xfer(dev, buf, buflen, rw); + consumed = ata_sff_data_xfer32(dev, buf, buflen, rw); local_irq_restore(flags); return consumed; @@ -716,13 +720,13 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) /* FIXME: use a bounce buffer */ local_irq_save(flags); - buf = kmap_atomic(page, KM_IRQ0); + buf = kmap_atomic(page); /* do the actual data transfer */ ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, do_write); - kunmap_atomic(buf, KM_IRQ0); + kunmap_atomic(buf); local_irq_restore(flags); } else { buf = page_address(page); @@ -861,13 +865,13 @@ next_sg: /* FIXME: use bounce buffer */ local_irq_save(flags); - buf = kmap_atomic(page, KM_IRQ0); + buf = kmap_atomic(page); /* do the actual data transfer */ consumed = ap->ops->sff_data_xfer(dev, buf + offset, count, rw); - kunmap_atomic(buf, KM_IRQ0); + kunmap_atomic(buf); local_irq_restore(flags); } else { buf = page_address(page); @@ -925,11 +929,11 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) bytes = (bc_hi << 8) | bc_lo; /* shall be cleared to zero, indicating xfer of data */ - if (unlikely(ireason & (1 << 0))) + if (unlikely(ireason & ATAPI_COD)) goto atapi_check; /* make sure transfer direction matches expected */ - i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; + i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0; if (unlikely(do_write != i_write)) goto atapi_check; @@ -1042,7 +1046,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, u8 status, int in_wq) { - struct ata_eh_info *ehi = &ap->link.eh_info; + struct ata_link *link = qc->dev->link; + struct ata_eh_info *ehi = &link->eh_info; unsigned long flags = 0; int poll_next; @@ -1298,11 +1303,28 @@ fsm_start: } EXPORT_SYMBOL_GPL(ata_sff_hsm_move); -void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay) +void ata_sff_queue_work(struct work_struct *work) +{ + queue_work(ata_sff_wq, work); +} +EXPORT_SYMBOL_GPL(ata_sff_queue_work); + +void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay) +{ + queue_delayed_work(ata_sff_wq, dwork, delay); +} +EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work); + +void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay) { + struct ata_port *ap = link->ap; + + WARN_ON((ap->sff_pio_task_link != NULL) && + (ap->sff_pio_task_link != link)); + ap->sff_pio_task_link = link; + /* may fail if ata_sff_flush_pio_task() in progress */ - queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, - msecs_to_jiffies(delay)); + ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay)); } EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task); @@ -1310,25 +1332,30 @@ void ata_sff_flush_pio_task(struct ata_port *ap) { DPRINTK("ENTER\n"); - cancel_rearming_delayed_work(&ap->sff_pio_task); + cancel_delayed_work_sync(&ap->sff_pio_task); ap->hsm_task_state = HSM_ST_IDLE; + ap->sff_pio_task_link = NULL; if (ata_msg_ctl(ap)) - ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__); + ata_port_dbg(ap, "%s: EXIT\n", __func__); } static void ata_sff_pio_task(struct work_struct *work) { struct ata_port *ap = container_of(work, struct ata_port, sff_pio_task.work); + struct ata_link *link = ap->sff_pio_task_link; struct ata_queued_cmd *qc; u8 status; int poll_next; + BUG_ON(ap->sff_pio_task_link == NULL); /* qc can be NULL if timeout occurred */ - qc = ata_qc_from_tag(ap, ap->link.active_tag); - if (!qc) + qc = ata_qc_from_tag(ap, link->active_tag); + if (!qc) { + ap->sff_pio_task_link = NULL; return; + } fsm_start: WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); @@ -1342,14 +1369,19 @@ fsm_start: */ status = ata_sff_busy_wait(ap, ATA_BUSY, 5); if (status & ATA_BUSY) { - msleep(2); + ata_msleep(ap, 2); status = ata_sff_busy_wait(ap, ATA_BUSY, 10); if (status & ATA_BUSY) { - ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE); + ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); return; } } + /* + * hsm_move() may trigger another command to be processed. + * clean the link beforehand. + */ + ap->sff_pio_task_link = NULL; /* move the HSM */ poll_next = ata_sff_hsm_move(ap, qc, status, 1); @@ -1376,6 +1408,7 @@ fsm_start: unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; + struct ata_link *link = qc->dev->link; /* Use polling pio if the LLD doesn't handle * interrupt driven pio and atapi CDB interrupt. @@ -1396,7 +1429,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) ap->hsm_task_state = HSM_ST_LAST; if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_sff_queue_pio_task(ap, 0); + ata_sff_queue_pio_task(link, 0); break; @@ -1409,7 +1442,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) if (qc->tf.flags & ATA_TFLAG_WRITE) { /* PIO data out protocol */ ap->hsm_task_state = HSM_ST_FIRST; - ata_sff_queue_pio_task(ap, 0); + ata_sff_queue_pio_task(link, 0); /* always send first data block using the * ata_sff_pio_task() codepath. @@ -1419,7 +1452,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) ap->hsm_task_state = HSM_ST; if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_sff_queue_pio_task(ap, 0); + ata_sff_queue_pio_task(link, 0); /* if polling, ata_sff_pio_task() handles the * rest. otherwise, interrupt handler takes @@ -1441,7 +1474,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) /* send cdb by polling if no cdb interrupt */ if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || (qc->tf.flags & ATA_TFLAG_POLLING)) - ata_sff_queue_pio_task(ap, 0); + ata_sff_queue_pio_task(link, 0); break; default: @@ -1482,7 +1515,7 @@ static unsigned int ata_sff_idle_irq(struct ata_port *ap) ap->ops->sff_check_status(ap); if (ap->ops->sff_irq_clear) ap->ops->sff_irq_clear(ap); - ata_port_printk(ap, KERN_WARNING, "irq trap\n"); + ata_port_warn(ap, "irq trap\n"); return 1; } #endif @@ -1512,11 +1545,10 @@ static unsigned int __ata_sff_port_intr(struct ata_port *ap, if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) return ata_sff_idle_irq(ap); break; - case HSM_ST: - case HSM_ST_LAST: - break; - default: + case HSM_ST_IDLE: return ata_sff_idle_irq(ap); + default: + break; } /* check main status, clearing INTRQ if needed */ @@ -1681,7 +1713,7 @@ void ata_sff_lost_interrupt(struct ata_port *ap) /* There was a command running, we are no longer busy and we have no interrupt. */ - ata_port_printk(ap, KERN_WARNING, "lost interrupt (Status 0x%x)\n", + ata_port_warn(ap, "lost interrupt (Status 0x%x)\n", status); /* Run the host interrupt logic as if the interrupt had not been lost */ @@ -1768,8 +1800,9 @@ int ata_sff_prereset(struct ata_link *link, unsigned long deadline) if (!ata_link_offline(link)) { rc = ata_sff_wait_ready(link, deadline); if (rc && rc != -ENODEV) { - ata_link_printk(link, KERN_WARNING, "device not ready " - "(errno=%d), forcing hardreset\n", rc); + ata_link_warn(link, + "device not ready (errno=%d), forcing hardreset\n", + rc); ehc->i.action |= ATA_EH_HARDRESET; } } @@ -1917,7 +1950,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, unsigned int dev1 = devmask & (1 << 1); int rc, ret = 0; - msleep(ATA_WAIT_AFTER_RESET); + ata_msleep(ap, ATA_WAIT_AFTER_RESET); /* always check readiness of the master device */ rc = ata_sff_wait_ready(link, deadline); @@ -1946,7 +1979,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, lbal = ioread8(ioaddr->lbal_addr); if ((nsect == 1) && (lbal == 1)) break; - msleep(50); /* give drive a breather */ + ata_msleep(ap, 50); /* give drive a breather */ } rc = ata_sff_wait_ready(link, deadline); @@ -2026,7 +2059,7 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes, rc = ata_bus_softreset(ap, devmask, deadline); /* if link is occupied, -ENODEV too is an error */ if (rc && (rc != -ENODEV || sata_scr_valid(link))) { - ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc); + ata_link_err(link, "SRST failed (errno=%d)\n", rc); return rc; } @@ -2140,8 +2173,7 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc) /* Can become DEBUG later */ if (count) - ata_port_printk(ap, KERN_DEBUG, - "drained %d bytes to clear DRQ.\n", count); + ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count); } EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); @@ -2286,9 +2318,9 @@ int ata_pci_sff_init_host(struct ata_host *host) rc = pcim_iomap_regions(pdev, 0x3 << base, dev_driver_string(gdev)); if (rc) { - dev_printk(KERN_WARNING, gdev, - "failed to request/iomap BARs for port %d " - "(errno=%d)\n", i, rc); + dev_warn(gdev, + "failed to request/iomap BARs for port %d (errno=%d)\n", + i, rc); if (rc == -EBUSY) pcim_pin_device(pdev); ap->ops = &ata_dummy_port_ops; @@ -2310,7 +2342,7 @@ int ata_pci_sff_init_host(struct ata_host *host) } if (!mask) { - dev_printk(KERN_ERR, gdev, "no available native port\n"); + dev_err(gdev, "no available native port\n"); return -ENODEV; } @@ -2345,8 +2377,7 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev, host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); if (!host) { - dev_printk(KERN_ERR, &pdev->dev, - "failed to allocate ATA host\n"); + dev_err(&pdev->dev, "failed to allocate ATA host\n"); rc = -ENOMEM; goto err_out; } @@ -2402,28 +2433,24 @@ int ata_pci_sff_activate_host(struct ata_host *host, mask = (1 << 2) | (1 << 0); if ((tmp8 & mask) != mask) legacy_mode = 1; -#if defined(CONFIG_NO_ATA_LEGACY) - /* Some platforms with PCI limits cannot address compat - port space. In that case we punt if their firmware has - left a device in compatibility mode */ - if (legacy_mode) { - printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n"); - return -EOPNOTSUPP; - } -#endif } if (!devres_open_group(dev, NULL, GFP_KERNEL)) return -ENOMEM; if (!legacy_mode && pdev->irq) { + int i; + rc = devm_request_irq(dev, pdev->irq, irq_handler, IRQF_SHARED, drv_name, host); if (rc) goto out; - ata_port_desc(host->ports[0], "irq %d", pdev->irq); - ata_port_desc(host->ports[1], "irq %d", pdev->irq); + for (i = 0; i < 2; i++) { + if (ata_port_is_dummy(host->ports[i])) + continue; + ata_port_desc(host->ports[i], "irq %d", pdev->irq); + } } else if (legacy_mode) { if (!ata_port_is_dummy(host->ports[0])) { rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev), @@ -2472,31 +2499,10 @@ static const struct ata_port_info *ata_sff_find_valid_pi( return NULL; } -/** - * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller - * @pdev: Controller to be initialized - * @ppi: array of port_info, must be enough for two ports - * @sht: scsi_host_template to use when registering the host - * @host_priv: host private_data - * @hflag: host flags - * - * This is a helper function which can be called from a driver's - * xxx_init_one() probe function if the hardware uses traditional - * IDE taskfile registers and is PIO only. - * - * ASSUMPTION: - * Nobody makes a single channel controller that appears solely as - * the secondary legacy port on PCI. - * - * LOCKING: - * Inherited from PCI layer (may sleep). - * - * RETURNS: - * Zero on success, negative on errno-based value on error. - */ -int ata_pci_sff_init_one(struct pci_dev *pdev, - const struct ata_port_info * const *ppi, - struct scsi_host_template *sht, void *host_priv, int hflag) +static int ata_pci_init_one(struct pci_dev *pdev, + const struct ata_port_info * const *ppi, + struct scsi_host_template *sht, void *host_priv, + int hflags, bool bmdma) { struct device *dev = &pdev->dev; const struct ata_port_info *pi; @@ -2507,8 +2513,7 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, pi = ata_sff_find_valid_pi(ppi); if (!pi) { - dev_printk(KERN_ERR, &pdev->dev, - "no valid port_info specified\n"); + dev_err(&pdev->dev, "no valid port_info specified\n"); return -EINVAL; } @@ -2519,14 +2524,26 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, if (rc) goto out; - /* prepare and activate SFF host */ - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); +#ifdef CONFIG_ATA_BMDMA + if (bmdma) + /* prepare and activate BMDMA host */ + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); + else +#endif + /* prepare and activate SFF host */ + rc = ata_pci_sff_prepare_host(pdev, ppi, &host); if (rc) goto out; host->private_data = host_priv; - host->flags |= hflag; + host->flags |= hflags; - rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); +#ifdef CONFIG_ATA_BMDMA + if (bmdma) { + pci_set_master(pdev); + rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); + } else +#endif + rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); out: if (rc == 0) devres_remove_group(&pdev->dev, NULL); @@ -2535,6 +2552,35 @@ out: return rc; } + +/** + * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller + * @pdev: Controller to be initialized + * @ppi: array of port_info, must be enough for two ports + * @sht: scsi_host_template to use when registering the host + * @host_priv: host private_data + * @hflag: host flags + * + * This is a helper function which can be called from a driver's + * xxx_init_one() probe function if the hardware uses traditional + * IDE taskfile registers and is PIO only. + * + * ASSUMPTION: + * Nobody makes a single channel controller that appears solely as + * the secondary legacy port on PCI. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, negative on errno-based value on error. + */ +int ata_pci_sff_init_one(struct pci_dev *pdev, + const struct ata_port_info * const *ppi, + struct scsi_host_template *sht, void *host_priv, int hflag) +{ + return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0); +} EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); #endif /* CONFIG_PCI */ @@ -2734,10 +2780,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - - /* see ata_dma_blacklisted() */ - BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) && - qc->tf.protocol == ATAPI_PROT_DMA); + struct ata_link *link = qc->dev->link; /* defer PIO handling to sff_qc_issue */ if (!ata_is_dma(qc->tf.protocol)) @@ -2766,7 +2809,7 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) /* send cdb by polling if no cdb interrupt */ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) - ata_sff_queue_pio_task(ap, 0); + ata_sff_queue_pio_task(link, 0); break; default: @@ -2812,7 +2855,7 @@ unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) bmdma_stopped = true; if (unlikely(host_stat & ATA_DMA_ERR)) { - /* error when transfering data to/from memory */ + /* error when transferring data to/from memory */ qc->err_mask |= AC_ERR_HOST_BUS; ap->hsm_task_state = HSM_ST_ERR; } @@ -3005,7 +3048,7 @@ void ata_bmdma_start(struct ata_queued_cmd *qc) * Or maybe I'm just being paranoid. * * FIXME: The posting of this write means I/O starts are - * unneccessarily delayed for MMIO + * unnecessarily delayed for MMIO */ } EXPORT_SYMBOL_GPL(ata_bmdma_start); @@ -3132,8 +3175,7 @@ static void ata_bmdma_nodma(struct ata_host *host, const char *reason) { int i; - dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n", - reason); + dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason); for (i = 0; i < 2; i++) { host->ports[i]->mwdma_mask = 0; @@ -3256,43 +3298,7 @@ int ata_pci_bmdma_init_one(struct pci_dev *pdev, struct scsi_host_template *sht, void *host_priv, int hflags) { - struct device *dev = &pdev->dev; - const struct ata_port_info *pi; - struct ata_host *host = NULL; - int rc; - - DPRINTK("ENTER\n"); - - pi = ata_sff_find_valid_pi(ppi); - if (!pi) { - dev_printk(KERN_ERR, &pdev->dev, - "no valid port_info specified\n"); - return -EINVAL; - } - - if (!devres_open_group(dev, NULL, GFP_KERNEL)) - return -ENOMEM; - - rc = pcim_enable_device(pdev); - if (rc) - goto out; - - /* prepare and activate BMDMA host */ - rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); - if (rc) - goto out; - host->private_data = host_priv; - host->flags |= hflags; - - pci_set_master(pdev); - rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); - out: - if (rc == 0) - devres_remove_group(&pdev->dev, NULL); - else - devres_release_group(&pdev->dev, NULL); - - return rc; + return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1); } EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one); @@ -3318,21 +3324,14 @@ void ata_sff_port_init(struct ata_port *ap) int __init ata_sff_init(void) { - /* - * FIXME: In UP case, there is only one workqueue thread and if you - * have more than one PIO device, latency is bloody awful, with - * occasional multi-second "hiccups" as one PIO device waits for - * another. It's an ugly wart that users DO occasionally complain - * about; luckily most users have at most one PIO polled device. - */ - ata_sff_wq = create_workqueue("ata_sff"); + ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE); if (!ata_sff_wq) return -ENOMEM; return 0; } -void __exit ata_sff_exit(void) +void ata_sff_exit(void) { destroy_workqueue(ata_sff_wq); } |
