diff options
Diffstat (limited to 'drivers/spi/spi.c')
| -rw-r--r-- | drivers/spi/spi.c | 683 |
1 files changed, 628 insertions, 55 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 32b7bb111eb..d4f9670b51b 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -24,6 +24,8 @@ #include <linux/device.h> #include <linux/init.h> #include <linux/cache.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> #include <linux/mutex.h> #include <linux/of_device.h> #include <linux/of_irq.h> @@ -39,6 +41,9 @@ #include <linux/ioport.h> #include <linux/acpi.h> +#define CREATE_TRACE_POINTS +#include <trace/events/spi.h> + static void spidev_release(struct device *dev) { struct spi_device *spi = to_spi_device(dev); @@ -55,14 +60,21 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, char *buf) { const struct spi_device *spi = to_spi_device(dev); + int len; + + len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); + if (len != -ENODEV) + return len; return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); } +static DEVICE_ATTR_RO(modalias); -static struct device_attribute spi_dev_attrs[] = { - __ATTR_RO(modalias), - __ATTR_NULL, +static struct attribute *spi_dev_attrs[] = { + &dev_attr_modalias.attr, + NULL, }; +ATTRIBUTE_GROUPS(spi_dev); /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, * and the sysfs version makes coldplug work too. @@ -109,6 +121,11 @@ static int spi_match_device(struct device *dev, struct device_driver *drv) static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) { const struct spi_device *spi = to_spi_device(dev); + int rc; + + rc = acpi_device_uevent_modalias(dev, env); + if (rc != -ENODEV) + return rc; add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); return 0; @@ -223,13 +240,13 @@ static const struct dev_pm_ops spi_pm = { SET_RUNTIME_PM_OPS( pm_generic_runtime_suspend, pm_generic_runtime_resume, - pm_generic_runtime_idle + NULL ) }; struct bus_type spi_bus_type = { .name = "spi", - .dev_attrs = spi_dev_attrs, + .dev_groups = spi_dev_groups, .match = spi_match_device, .uevent = spi_uevent, .pm = &spi_pm, @@ -240,15 +257,25 @@ EXPORT_SYMBOL_GPL(spi_bus_type); static int spi_drv_probe(struct device *dev) { const struct spi_driver *sdrv = to_spi_driver(dev->driver); + int ret; + + acpi_dev_pm_attach(dev, true); + ret = sdrv->probe(to_spi_device(dev)); + if (ret) + acpi_dev_pm_detach(dev, true); - return sdrv->probe(to_spi_device(dev)); + return ret; } static int spi_drv_remove(struct device *dev) { const struct spi_driver *sdrv = to_spi_driver(dev->driver); + int ret; - return sdrv->remove(to_spi_device(dev)); + ret = sdrv->remove(to_spi_device(dev)); + acpi_dev_pm_detach(dev, true); + + return ret; } static void spi_drv_shutdown(struct device *dev) @@ -323,7 +350,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master) if (!spi_master_get(master)) return NULL; - spi = kzalloc(sizeof *spi, GFP_KERNEL); + spi = kzalloc(sizeof(*spi), GFP_KERNEL); if (!spi) { dev_err(dev, "cannot alloc spi_device\n"); spi_master_put(master); @@ -340,6 +367,30 @@ struct spi_device *spi_alloc_device(struct spi_master *master) } EXPORT_SYMBOL_GPL(spi_alloc_device); +static void spi_dev_set_name(struct spi_device *spi) +{ + struct acpi_device *adev = ACPI_COMPANION(&spi->dev); + + if (adev) { + dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); + return; + } + + dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), + spi->chip_select); +} + +static int spi_dev_check(struct device *dev, void *data) +{ + struct spi_device *spi = to_spi_device(dev); + struct spi_device *new_spi = data; + + if (spi->master == new_spi->master && + spi->chip_select == new_spi->chip_select) + return -EBUSY; + return 0; +} + /** * spi_add_device - Add spi_device allocated with spi_alloc_device * @spi: spi_device to register @@ -354,7 +405,6 @@ int spi_add_device(struct spi_device *spi) static DEFINE_MUTEX(spi_add_lock); struct spi_master *master = spi->master; struct device *dev = master->dev.parent; - struct device *d; int status; /* Chipselects are numbered 0..max; validate. */ @@ -366,9 +416,7 @@ int spi_add_device(struct spi_device *spi) } /* Set the bus ID string */ - dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), - spi->chip_select); - + spi_dev_set_name(spi); /* We need to make sure there's no other device with this * chipselect **BEFORE** we call setup(), else we'll trash @@ -376,12 +424,10 @@ int spi_add_device(struct spi_device *spi) */ mutex_lock(&spi_add_lock); - d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)); - if (d != NULL) { + status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); + if (status) { dev_err(dev, "chipselect %d already in use\n", spi->chip_select); - put_device(d); - status = -EBUSY; goto done; } @@ -523,6 +569,291 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n) /*-------------------------------------------------------------------------*/ +static void spi_set_cs(struct spi_device *spi, bool enable) +{ + if (spi->mode & SPI_CS_HIGH) + enable = !enable; + + if (spi->cs_gpio >= 0) + gpio_set_value(spi->cs_gpio, !enable); + else if (spi->master->set_cs) + spi->master->set_cs(spi, !enable); +} + +#ifdef CONFIG_HAS_DMA +static int spi_map_buf(struct spi_master *master, struct device *dev, + struct sg_table *sgt, void *buf, size_t len, + enum dma_data_direction dir) +{ + const bool vmalloced_buf = is_vmalloc_addr(buf); + const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len; + const int sgs = DIV_ROUND_UP(len, desc_len); + struct page *vm_page; + void *sg_buf; + size_t min; + int i, ret; + + ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); + if (ret != 0) + return ret; + + for (i = 0; i < sgs; i++) { + min = min_t(size_t, len, desc_len); + + if (vmalloced_buf) { + vm_page = vmalloc_to_page(buf); + if (!vm_page) { + sg_free_table(sgt); + return -ENOMEM; + } + sg_buf = page_address(vm_page) + + ((size_t)buf & ~PAGE_MASK); + } else { + sg_buf = buf; + } + + sg_set_buf(&sgt->sgl[i], sg_buf, min); + + buf += min; + len -= min; + } + + ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); + if (ret < 0) { + sg_free_table(sgt); + return ret; + } + + sgt->nents = ret; + + return 0; +} + +static void spi_unmap_buf(struct spi_master *master, struct device *dev, + struct sg_table *sgt, enum dma_data_direction dir) +{ + if (sgt->orig_nents) { + dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); + sg_free_table(sgt); + } +} + +static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) +{ + struct device *tx_dev, *rx_dev; + struct spi_transfer *xfer; + int ret; + + if (!master->can_dma) + return 0; + + tx_dev = &master->dma_tx->dev->device; + rx_dev = &master->dma_rx->dev->device; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (!master->can_dma(master, msg->spi, xfer)) + continue; + + if (xfer->tx_buf != NULL) { + ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, + (void *)xfer->tx_buf, xfer->len, + DMA_TO_DEVICE); + if (ret != 0) + return ret; + } + + if (xfer->rx_buf != NULL) { + ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, + xfer->rx_buf, xfer->len, + DMA_FROM_DEVICE); + if (ret != 0) { + spi_unmap_buf(master, tx_dev, &xfer->tx_sg, + DMA_TO_DEVICE); + return ret; + } + } + } + + master->cur_msg_mapped = true; + + return 0; +} + +static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) +{ + struct spi_transfer *xfer; + struct device *tx_dev, *rx_dev; + + if (!master->cur_msg_mapped || !master->can_dma) + return 0; + + tx_dev = &master->dma_tx->dev->device; + rx_dev = &master->dma_rx->dev->device; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (!master->can_dma(master, msg->spi, xfer)) + continue; + + spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); + spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); + } + + return 0; +} +#else /* !CONFIG_HAS_DMA */ +static inline int __spi_map_msg(struct spi_master *master, + struct spi_message *msg) +{ + return 0; +} + +static inline int spi_unmap_msg(struct spi_master *master, + struct spi_message *msg) +{ + return 0; +} +#endif /* !CONFIG_HAS_DMA */ + +static int spi_map_msg(struct spi_master *master, struct spi_message *msg) +{ + struct spi_transfer *xfer; + void *tmp; + unsigned int max_tx, max_rx; + + if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { + max_tx = 0; + max_rx = 0; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if ((master->flags & SPI_MASTER_MUST_TX) && + !xfer->tx_buf) + max_tx = max(xfer->len, max_tx); + if ((master->flags & SPI_MASTER_MUST_RX) && + !xfer->rx_buf) + max_rx = max(xfer->len, max_rx); + } + + if (max_tx) { + tmp = krealloc(master->dummy_tx, max_tx, + GFP_KERNEL | GFP_DMA); + if (!tmp) + return -ENOMEM; + master->dummy_tx = tmp; + memset(tmp, 0, max_tx); + } + + if (max_rx) { + tmp = krealloc(master->dummy_rx, max_rx, + GFP_KERNEL | GFP_DMA); + if (!tmp) + return -ENOMEM; + master->dummy_rx = tmp; + } + + if (max_tx || max_rx) { + list_for_each_entry(xfer, &msg->transfers, + transfer_list) { + if (!xfer->tx_buf) + xfer->tx_buf = master->dummy_tx; + if (!xfer->rx_buf) + xfer->rx_buf = master->dummy_rx; + } + } + } + + return __spi_map_msg(master, msg); +} + +/* + * spi_transfer_one_message - Default implementation of transfer_one_message() + * + * This is a standard implementation of transfer_one_message() for + * drivers which impelment a transfer_one() operation. It provides + * standard handling of delays and chip select management. + */ +static int spi_transfer_one_message(struct spi_master *master, + struct spi_message *msg) +{ + struct spi_transfer *xfer; + bool keep_cs = false; + int ret = 0; + int ms = 1; + + spi_set_cs(msg->spi, true); + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + trace_spi_transfer_start(msg, xfer); + + reinit_completion(&master->xfer_completion); + + ret = master->transfer_one(master, msg->spi, xfer); + if (ret < 0) { + dev_err(&msg->spi->dev, + "SPI transfer failed: %d\n", ret); + goto out; + } + + if (ret > 0) { + ret = 0; + ms = xfer->len * 8 * 1000 / xfer->speed_hz; + ms += ms + 100; /* some tolerance */ + + ms = wait_for_completion_timeout(&master->xfer_completion, + msecs_to_jiffies(ms)); + } + + if (ms == 0) { + dev_err(&msg->spi->dev, "SPI transfer timed out\n"); + msg->status = -ETIMEDOUT; + } + + trace_spi_transfer_stop(msg, xfer); + + if (msg->status != -EINPROGRESS) + goto out; + + if (xfer->delay_usecs) + udelay(xfer->delay_usecs); + + if (xfer->cs_change) { + if (list_is_last(&xfer->transfer_list, + &msg->transfers)) { + keep_cs = true; + } else { + spi_set_cs(msg->spi, false); + udelay(10); + spi_set_cs(msg->spi, true); + } + } + + msg->actual_length += xfer->len; + } + +out: + if (ret != 0 || !keep_cs) + spi_set_cs(msg->spi, false); + + if (msg->status == -EINPROGRESS) + msg->status = ret; + + spi_finalize_current_message(master); + + return ret; +} + +/** + * spi_finalize_current_transfer - report completion of a transfer + * + * Called by SPI drivers using the core transfer_one_message() + * implementation to notify it that the current interrupt driven + * transfer has finished and the next one may be scheduled. + */ +void spi_finalize_current_transfer(struct spi_master *master) +{ + complete(&master->xfer_completion); +} +EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); + /** * spi_pump_messages - kthread work function which processes spi message queue * @work: pointer to kthread work struct contained in the master struct @@ -549,10 +880,19 @@ static void spi_pump_messages(struct kthread_work *work) } master->busy = false; spin_unlock_irqrestore(&master->queue_lock, flags); + kfree(master->dummy_rx); + master->dummy_rx = NULL; + kfree(master->dummy_tx); + master->dummy_tx = NULL; if (master->unprepare_transfer_hardware && master->unprepare_transfer_hardware(master)) dev_err(&master->dev, "failed to unprepare transfer hardware\n"); + if (master->auto_runtime_pm) { + pm_runtime_mark_last_busy(master->dev.parent); + pm_runtime_put_autosuspend(master->dev.parent); + } + trace_spi_master_idle(master); return; } @@ -563,7 +903,7 @@ static void spi_pump_messages(struct kthread_work *work) } /* Extract head of queue */ master->cur_msg = - list_entry(master->queue.next, struct spi_message, queue); + list_first_entry(&master->queue, struct spi_message, queue); list_del_init(&master->cur_msg->queue); if (master->busy) @@ -572,15 +912,51 @@ static void spi_pump_messages(struct kthread_work *work) master->busy = true; spin_unlock_irqrestore(&master->queue_lock, flags); + if (!was_busy && master->auto_runtime_pm) { + ret = pm_runtime_get_sync(master->dev.parent); + if (ret < 0) { + dev_err(&master->dev, "Failed to power device: %d\n", + ret); + return; + } + } + + if (!was_busy) + trace_spi_master_busy(master); + if (!was_busy && master->prepare_transfer_hardware) { ret = master->prepare_transfer_hardware(master); if (ret) { dev_err(&master->dev, "failed to prepare transfer hardware\n"); + + if (master->auto_runtime_pm) + pm_runtime_put(master->dev.parent); return; } } + trace_spi_message_start(master->cur_msg); + + if (master->prepare_message) { + ret = master->prepare_message(master, master->cur_msg); + if (ret) { + dev_err(&master->dev, + "failed to prepare message: %d\n", ret); + master->cur_msg->status = ret; + spi_finalize_current_message(master); + return; + } + master->cur_msg_prepared = true; + } + + ret = spi_map_msg(master, master->cur_msg); + if (ret) { + master->cur_msg->status = ret; + spi_finalize_current_message(master); + return; + } + ret = master->transfer_one_message(master, master->cur_msg); if (ret) { dev_err(&master->dev, @@ -601,7 +977,7 @@ static int spi_init_queue(struct spi_master *master) init_kthread_worker(&master->kworker); master->kworker_task = kthread_run(kthread_worker_fn, - &master->kworker, + &master->kworker, "%s", dev_name(&master->dev)); if (IS_ERR(master->kworker_task)) { dev_err(&master->dev, "failed to create message pump task\n"); @@ -640,11 +1016,8 @@ struct spi_message *spi_get_next_queued_message(struct spi_master *master) /* get a pointer to the next message, if any */ spin_lock_irqsave(&master->queue_lock, flags); - if (list_empty(&master->queue)) - next = NULL; - else - next = list_entry(master->queue.next, - struct spi_message, queue); + next = list_first_entry_or_null(&master->queue, struct spi_message, + queue); spin_unlock_irqrestore(&master->queue_lock, flags); return next; @@ -662,6 +1035,7 @@ void spi_finalize_current_message(struct spi_master *master) { struct spi_message *mesg; unsigned long flags; + int ret; spin_lock_irqsave(&master->queue_lock, flags); mesg = master->cur_msg; @@ -670,9 +1044,22 @@ void spi_finalize_current_message(struct spi_master *master) queue_kthread_work(&master->kworker, &master->pump_messages); spin_unlock_irqrestore(&master->queue_lock, flags); + spi_unmap_msg(master, mesg); + + if (master->cur_msg_prepared && master->unprepare_message) { + ret = master->unprepare_message(master, mesg); + if (ret) { + dev_err(&master->dev, + "failed to unprepare message: %d\n", ret); + } + } + master->cur_msg_prepared = false; + mesg->state = NULL; if (mesg->complete) mesg->complete(mesg->context); + + trace_spi_message_done(mesg); } EXPORT_SYMBOL_GPL(spi_finalize_current_message); @@ -712,7 +1099,7 @@ static int spi_stop_queue(struct spi_master *master) */ while ((!list_empty(&master->queue) || master->busy) && limit--) { spin_unlock_irqrestore(&master->queue_lock, flags); - msleep(10); + usleep_range(10000, 11000); spin_lock_irqsave(&master->queue_lock, flags); } @@ -774,7 +1161,7 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) msg->status = -EINPROGRESS; list_add_tail(&msg->queue, &master->queue); - if (master->running && !master->busy) + if (!master->busy) queue_kthread_work(&master->kworker, &master->pump_messages); spin_unlock_irqrestore(&master->queue_lock, flags); @@ -785,8 +1172,9 @@ static int spi_master_initialize_queue(struct spi_master *master) { int ret; - master->queued = true; master->transfer = spi_queued_transfer; + if (!master->transfer_one_message) + master->transfer_one_message = spi_transfer_one_message; /* Initialize and start queue */ ret = spi_init_queue(master); @@ -794,6 +1182,7 @@ static int spi_master_initialize_queue(struct spi_master *master) dev_err(&master->dev, "problem initializing queue\n"); goto err_init_queue; } + master->queued = true; ret = spi_start_queue(master); if (ret) { dev_err(&master->dev, "problem starting queue\n"); @@ -803,8 +1192,8 @@ static int spi_master_initialize_queue(struct spi_master *master) return 0; err_start_queue: -err_init_queue: spi_destroy_queue(master); +err_init_queue: return ret; } @@ -822,10 +1211,8 @@ static void of_register_spi_devices(struct spi_master *master) { struct spi_device *spi; struct device_node *nc; - const __be32 *prop; - char modalias[SPI_NAME_SIZE + 4]; int rc; - int len; + u32 value; if (!master->dev.of_node) return; @@ -850,14 +1237,14 @@ static void of_register_spi_devices(struct spi_master *master) } /* Device address */ - prop = of_get_property(nc, "reg", &len); - if (!prop || len < sizeof(*prop)) { - dev_err(&master->dev, "%s has no 'reg' property\n", - nc->full_name); + rc = of_property_read_u32(nc, "reg", &value); + if (rc) { + dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", + nc->full_name, rc); spi_dev_put(spi); continue; } - spi->chip_select = be32_to_cpup(prop); + spi->chip_select = value; /* Mode (clock phase/polarity/etc.) */ if (of_find_property(nc, "spi-cpha", NULL)) @@ -868,16 +1255,55 @@ static void of_register_spi_devices(struct spi_master *master) spi->mode |= SPI_CS_HIGH; if (of_find_property(nc, "spi-3wire", NULL)) spi->mode |= SPI_3WIRE; + if (of_find_property(nc, "spi-lsb-first", NULL)) + spi->mode |= SPI_LSB_FIRST; + + /* Device DUAL/QUAD mode */ + if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { + switch (value) { + case 1: + break; + case 2: + spi->mode |= SPI_TX_DUAL; + break; + case 4: + spi->mode |= SPI_TX_QUAD; + break; + default: + dev_warn(&master->dev, + "spi-tx-bus-width %d not supported\n", + value); + break; + } + } + + if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { + switch (value) { + case 1: + break; + case 2: + spi->mode |= SPI_RX_DUAL; + break; + case 4: + spi->mode |= SPI_RX_QUAD; + break; + default: + dev_warn(&master->dev, + "spi-rx-bus-width %d not supported\n", + value); + break; + } + } /* Device speed */ - prop = of_get_property(nc, "spi-max-frequency", &len); - if (!prop || len < sizeof(*prop)) { - dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n", - nc->full_name); + rc = of_property_read_u32(nc, "spi-max-frequency", &value); + if (rc) { + dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", + nc->full_name, rc); spi_dev_put(spi); continue; } - spi->max_speed_hz = be32_to_cpup(prop); + spi->max_speed_hz = value; /* IRQ */ spi->irq = irq_of_parse_and_map(nc, 0); @@ -887,9 +1313,7 @@ static void of_register_spi_devices(struct spi_master *master) spi->dev.of_node = nc; /* Register the new device */ - snprintf(modalias, sizeof(modalias), "%s%s", SPI_MODULE_PREFIX, - spi->modalias); - request_module(modalias); + request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias); rc = spi_add_device(spi); if (rc) { dev_err(&master->dev, "spi_device register error %s\n", @@ -955,7 +1379,7 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, return AE_NO_MEMORY; } - ACPI_HANDLE_SET(&spi->dev, handle); + ACPI_COMPANION_SET(&spi->dev, adev); spi->irq = -1; INIT_LIST_HEAD(&resource_list); @@ -968,8 +1392,10 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, return AE_OK; } - strlcpy(spi->modalias, dev_name(&adev->dev), sizeof(spi->modalias)); + adev->power.flags.ignore_parent = true; + strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); if (spi_add_device(spi)) { + adev->power.flags.ignore_parent = false; dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", dev_name(&adev->dev)); spi_dev_put(spi); @@ -1040,7 +1466,7 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size) if (!dev) return NULL; - master = kzalloc(size + sizeof *master, GFP_KERNEL); + master = kzalloc(size + sizeof(*master), GFP_KERNEL); if (!master) return NULL; @@ -1065,7 +1491,7 @@ static int of_spi_register_master(struct spi_master *master) return 0; nb = of_gpio_named_count(np, "cs-gpios"); - master->num_chipselect = max(nb, (int)master->num_chipselect); + master->num_chipselect = max_t(int, nb, master->num_chipselect); /* Return error only for an incorrectly formed cs-gpios property */ if (nb == 0 || nb == -ENOENT) @@ -1152,6 +1578,9 @@ int spi_register_master(struct spi_master *master) spin_lock_init(&master->bus_lock_spinlock); mutex_init(&master->bus_lock_mutex); master->bus_lock_flag = 0; + init_completion(&master->xfer_completion); + if (!master->max_dma_len) + master->max_dma_len = INT_MAX; /* register the device, then userspace will see it. * registration fails if the bus ID is in use. @@ -1169,7 +1598,7 @@ int spi_register_master(struct spi_master *master) else { status = spi_master_initialize_queue(master); if (status) { - device_unregister(&master->dev); + device_del(&master->dev); goto done; } } @@ -1188,6 +1617,41 @@ done: } EXPORT_SYMBOL_GPL(spi_register_master); +static void devm_spi_unregister(struct device *dev, void *res) +{ + spi_unregister_master(*(struct spi_master **)res); +} + +/** + * dev_spi_register_master - register managed SPI master controller + * @dev: device managing SPI master + * @master: initialized master, originally from spi_alloc_master() + * Context: can sleep + * + * Register a SPI device as with spi_register_master() which will + * automatically be unregister + */ +int devm_spi_register_master(struct device *dev, struct spi_master *master) +{ + struct spi_master **ptr; + int ret; + + ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + ret = spi_register_master(master); + if (!ret) { + *ptr = master; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return ret; +} +EXPORT_SYMBOL_GPL(devm_spi_register_master); + static int __unregister(struct device *dev, void *null) { spi_unregister_device(to_spi_device(dev)); @@ -1313,13 +1777,35 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master); */ int spi_setup(struct spi_device *spi) { - unsigned bad_bits; + unsigned bad_bits, ugly_bits; int status = 0; + /* check mode to prevent that DUAL and QUAD set at the same time + */ + if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || + ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { + dev_err(&spi->dev, + "setup: can not select dual and quad at the same time\n"); + return -EINVAL; + } + /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden + */ + if ((spi->mode & SPI_3WIRE) && (spi->mode & + (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) + return -EINVAL; /* help drivers fail *cleanly* when they need options * that aren't supported with their current master */ bad_bits = spi->mode & ~spi->master->mode_bits; + ugly_bits = bad_bits & + (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); + if (ugly_bits) { + dev_warn(&spi->dev, + "setup: ignoring unsupported mode bits %x\n", + ugly_bits); + spi->mode &= ~ugly_bits; + bad_bits &= ~ugly_bits; + } if (bad_bits) { dev_err(&spi->dev, "setup: unsupported mode bits %x\n", bad_bits); @@ -1329,11 +1815,13 @@ int spi_setup(struct spi_device *spi) if (!spi->bits_per_word) spi->bits_per_word = 8; + if (!spi->max_speed_hz) + spi->max_speed_hz = spi->master->max_speed_hz; + if (spi->master->setup) status = spi->master->setup(spi); - dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s" - "%u bits/w, %u Hz max --> %d\n", + dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", @@ -1346,10 +1834,14 @@ int spi_setup(struct spi_device *spi) } EXPORT_SYMBOL_GPL(spi_setup); -static int __spi_async(struct spi_device *spi, struct spi_message *message) +static int __spi_validate(struct spi_device *spi, struct spi_message *message) { struct spi_master *master = spi->master; struct spi_transfer *xfer; + int w_size; + + if (list_empty(&message->transfers)) + return -EINVAL; /* Half-duplex links include original MicroWire, and ones with * only one data pin like SPI_3WIRE (switches direction) or where @@ -1373,12 +1865,21 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) /** * Set transfer bits_per_word and max speed as spi device default if * it is not set for this transfer. + * Set transfer tx_nbits and rx_nbits as single transfer default + * (SPI_NBITS_SINGLE) if it is not set for this transfer. */ list_for_each_entry(xfer, &message->transfers, transfer_list) { + message->frame_length += xfer->len; if (!xfer->bits_per_word) xfer->bits_per_word = spi->bits_per_word; + if (!xfer->speed_hz) xfer->speed_hz = spi->max_speed_hz; + + if (master->max_speed_hz && + xfer->speed_hz > master->max_speed_hz) + xfer->speed_hz = master->max_speed_hz; + if (master->bits_per_word_mask) { /* Only 32 bits fit in the mask */ if (xfer->bits_per_word > 32) @@ -1387,10 +1888,74 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) BIT(xfer->bits_per_word - 1))) return -EINVAL; } + + /* + * SPI transfer length should be multiple of SPI word size + * where SPI word size should be power-of-two multiple + */ + if (xfer->bits_per_word <= 8) + w_size = 1; + else if (xfer->bits_per_word <= 16) + w_size = 2; + else + w_size = 4; + + /* No partial transfers accepted */ + if (xfer->len % w_size) + return -EINVAL; + + if (xfer->speed_hz && master->min_speed_hz && + xfer->speed_hz < master->min_speed_hz) + return -EINVAL; + + if (xfer->tx_buf && !xfer->tx_nbits) + xfer->tx_nbits = SPI_NBITS_SINGLE; + if (xfer->rx_buf && !xfer->rx_nbits) + xfer->rx_nbits = SPI_NBITS_SINGLE; + /* check transfer tx/rx_nbits: + * 1. check the value matches one of single, dual and quad + * 2. check tx/rx_nbits match the mode in spi_device + */ + if (xfer->tx_buf) { + if (xfer->tx_nbits != SPI_NBITS_SINGLE && + xfer->tx_nbits != SPI_NBITS_DUAL && + xfer->tx_nbits != SPI_NBITS_QUAD) + return -EINVAL; + if ((xfer->tx_nbits == SPI_NBITS_DUAL) && + !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) + return -EINVAL; + if ((xfer->tx_nbits == SPI_NBITS_QUAD) && + !(spi->mode & SPI_TX_QUAD)) + return -EINVAL; + } + /* check transfer rx_nbits */ + if (xfer->rx_buf) { + if (xfer->rx_nbits != SPI_NBITS_SINGLE && + xfer->rx_nbits != SPI_NBITS_DUAL && + xfer->rx_nbits != SPI_NBITS_QUAD) + return -EINVAL; + if ((xfer->rx_nbits == SPI_NBITS_DUAL) && + !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) + return -EINVAL; + if ((xfer->rx_nbits == SPI_NBITS_QUAD) && + !(spi->mode & SPI_RX_QUAD)) + return -EINVAL; + } } - message->spi = spi; message->status = -EINPROGRESS; + + return 0; +} + +static int __spi_async(struct spi_device *spi, struct spi_message *message) +{ + struct spi_master *master = spi->master; + + message->spi = spi; + + trace_spi_message_submit(message); + return master->transfer(spi, message); } @@ -1429,6 +1994,10 @@ int spi_async(struct spi_device *spi, struct spi_message *message) int ret; unsigned long flags; + ret = __spi_validate(spi, message); + if (ret != 0) + return ret; + spin_lock_irqsave(&master->bus_lock_spinlock, flags); if (master->bus_lock_flag) @@ -1477,6 +2046,10 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message) int ret; unsigned long flags; + ret = __spi_validate(spi, message); + if (ret != 0) + return ret; + spin_lock_irqsave(&master->bus_lock_spinlock, flags); ret = __spi_async(spi, message); @@ -1631,7 +2204,7 @@ int spi_bus_unlock(struct spi_master *master) EXPORT_SYMBOL_GPL(spi_bus_unlock); /* portable code must never pass more than 32 bytes */ -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) +#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) static u8 *buf; @@ -1680,7 +2253,7 @@ int spi_write_then_read(struct spi_device *spi, } spi_message_init(&message); - memset(x, 0, sizeof x); + memset(x, 0, sizeof(x)); if (n_tx) { x[0].len = n_tx; spi_message_add_tail(&x[0], &message); |
