diff options
Diffstat (limited to 'drivers/spi/spi.c')
| -rw-r--r-- | drivers/spi/spi.c | 1389 | 
1 files changed, 1322 insertions, 67 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 709c836607d..d4f9670b51b 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1,7 +1,8 @@  /* - * spi.c - SPI init/core code + * SPI init/core code   *   * Copyright (C) 2005 David Brownell + * Copyright (C) 2008 Secret Lab Technologies Ltd.   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License as published by @@ -19,15 +20,29 @@   */  #include <linux/kernel.h> +#include <linux/kmod.h>  #include <linux/device.h>  #include <linux/init.h>  #include <linux/cache.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h>  #include <linux/mutex.h>  #include <linux/of_device.h> +#include <linux/of_irq.h>  #include <linux/slab.h>  #include <linux/mod_devicetable.h>  #include <linux/spi/spi.h> -#include <linux/of_spi.h> +#include <linux/of_gpio.h> +#include <linux/pm_runtime.h> +#include <linux/export.h> +#include <linux/sched/rt.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/ioport.h> +#include <linux/acpi.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/spi.h>  static void spidev_release(struct device *dev)  { @@ -45,14 +60,21 @@ static ssize_t  modalias_show(struct device *dev, struct device_attribute *a, char *buf)  {  	const struct spi_device	*spi = to_spi_device(dev); +	int len; -	return sprintf(buf, "%s\n", spi->modalias); +	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); +	if (len != -ENODEV) +		return len; + +	return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);  } +static DEVICE_ATTR_RO(modalias); -static struct device_attribute spi_dev_attrs[] = { -	__ATTR_RO(modalias), -	__ATTR_NULL, +static struct attribute *spi_dev_attrs[] = { +	&dev_attr_modalias.attr, +	NULL,  }; +ATTRIBUTE_GROUPS(spi_dev);  /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,   * and the sysfs version makes coldplug work too. @@ -86,6 +108,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)  	if (of_driver_match_device(dev, drv))  		return 1; +	/* Then try ACPI */ +	if (acpi_driver_match_device(dev, drv)) +		return 1; +  	if (sdrv->id_table)  		return !!spi_match_id(sdrv->id_table, spi); @@ -95,14 +121,18 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)  static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)  {  	const struct spi_device		*spi = to_spi_device(dev); +	int rc; + +	rc = acpi_device_uevent_modalias(dev, env); +	if (rc != -ENODEV) +		return rc;  	add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);  	return 0;  } -#ifdef	CONFIG_PM - -static int spi_suspend(struct device *dev, pm_message_t message) +#ifdef CONFIG_PM_SLEEP +static int spi_legacy_suspend(struct device *dev, pm_message_t message)  {  	int			value = 0;  	struct spi_driver	*drv = to_spi_driver(dev->driver); @@ -117,7 +147,7 @@ static int spi_suspend(struct device *dev, pm_message_t message)  	return value;  } -static int spi_resume(struct device *dev) +static int spi_legacy_resume(struct device *dev)  {  	int			value = 0;  	struct spi_driver	*drv = to_spi_driver(dev->driver); @@ -132,18 +162,94 @@ static int spi_resume(struct device *dev)  	return value;  } +static int spi_pm_suspend(struct device *dev) +{ +	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + +	if (pm) +		return pm_generic_suspend(dev); +	else +		return spi_legacy_suspend(dev, PMSG_SUSPEND); +} + +static int spi_pm_resume(struct device *dev) +{ +	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + +	if (pm) +		return pm_generic_resume(dev); +	else +		return spi_legacy_resume(dev); +} + +static int spi_pm_freeze(struct device *dev) +{ +	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + +	if (pm) +		return pm_generic_freeze(dev); +	else +		return spi_legacy_suspend(dev, PMSG_FREEZE); +} + +static int spi_pm_thaw(struct device *dev) +{ +	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + +	if (pm) +		return pm_generic_thaw(dev); +	else +		return spi_legacy_resume(dev); +} + +static int spi_pm_poweroff(struct device *dev) +{ +	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + +	if (pm) +		return pm_generic_poweroff(dev); +	else +		return spi_legacy_suspend(dev, PMSG_HIBERNATE); +} + +static int spi_pm_restore(struct device *dev) +{ +	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + +	if (pm) +		return pm_generic_restore(dev); +	else +		return spi_legacy_resume(dev); +}  #else -#define spi_suspend	NULL -#define spi_resume	NULL +#define spi_pm_suspend	NULL +#define spi_pm_resume	NULL +#define spi_pm_freeze	NULL +#define spi_pm_thaw	NULL +#define spi_pm_poweroff	NULL +#define spi_pm_restore	NULL  #endif +static const struct dev_pm_ops spi_pm = { +	.suspend = spi_pm_suspend, +	.resume = spi_pm_resume, +	.freeze = spi_pm_freeze, +	.thaw = spi_pm_thaw, +	.poweroff = spi_pm_poweroff, +	.restore = spi_pm_restore, +	SET_RUNTIME_PM_OPS( +		pm_generic_runtime_suspend, +		pm_generic_runtime_resume, +		NULL +	) +}; +  struct bus_type spi_bus_type = {  	.name		= "spi", -	.dev_attrs	= spi_dev_attrs, +	.dev_groups	= spi_dev_groups,  	.match		= spi_match_device,  	.uevent		= spi_uevent, -	.suspend	= spi_suspend, -	.resume		= spi_resume, +	.pm		= &spi_pm,  };  EXPORT_SYMBOL_GPL(spi_bus_type); @@ -151,15 +257,25 @@ EXPORT_SYMBOL_GPL(spi_bus_type);  static int spi_drv_probe(struct device *dev)  {  	const struct spi_driver		*sdrv = to_spi_driver(dev->driver); +	int ret; -	return sdrv->probe(to_spi_device(dev)); +	acpi_dev_pm_attach(dev, true); +	ret = sdrv->probe(to_spi_device(dev)); +	if (ret) +		acpi_dev_pm_detach(dev, true); + +	return ret;  }  static int spi_drv_remove(struct device *dev)  {  	const struct spi_driver		*sdrv = to_spi_driver(dev->driver); +	int ret; + +	ret = sdrv->remove(to_spi_device(dev)); +	acpi_dev_pm_detach(dev, true); -	return sdrv->remove(to_spi_device(dev)); +	return ret;  }  static void spi_drv_shutdown(struct device *dev) @@ -234,7 +350,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)  	if (!spi_master_get(master))  		return NULL; -	spi = kzalloc(sizeof *spi, GFP_KERNEL); +	spi = kzalloc(sizeof(*spi), GFP_KERNEL);  	if (!spi) {  		dev_err(dev, "cannot alloc spi_device\n");  		spi_master_put(master); @@ -242,14 +358,39 @@ struct spi_device *spi_alloc_device(struct spi_master *master)  	}  	spi->master = master; -	spi->dev.parent = dev; +	spi->dev.parent = &master->dev;  	spi->dev.bus = &spi_bus_type;  	spi->dev.release = spidev_release; +	spi->cs_gpio = -ENOENT;  	device_initialize(&spi->dev);  	return spi;  }  EXPORT_SYMBOL_GPL(spi_alloc_device); +static void spi_dev_set_name(struct spi_device *spi) +{ +	struct acpi_device *adev = ACPI_COMPANION(&spi->dev); + +	if (adev) { +		dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); +		return; +	} + +	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), +		     spi->chip_select); +} + +static int spi_dev_check(struct device *dev, void *data) +{ +	struct spi_device *spi = to_spi_device(dev); +	struct spi_device *new_spi = data; + +	if (spi->master == new_spi->master && +	    spi->chip_select == new_spi->chip_select) +		return -EBUSY; +	return 0; +} +  /**   * spi_add_device - Add spi_device allocated with spi_alloc_device   * @spi: spi_device to register @@ -262,22 +403,20 @@ EXPORT_SYMBOL_GPL(spi_alloc_device);  int spi_add_device(struct spi_device *spi)  {  	static DEFINE_MUTEX(spi_add_lock); -	struct device *dev = spi->master->dev.parent; -	struct device *d; +	struct spi_master *master = spi->master; +	struct device *dev = master->dev.parent;  	int status;  	/* Chipselects are numbered 0..max; validate. */ -	if (spi->chip_select >= spi->master->num_chipselect) { +	if (spi->chip_select >= master->num_chipselect) {  		dev_err(dev, "cs%d >= max %d\n",  			spi->chip_select, -			spi->master->num_chipselect); +			master->num_chipselect);  		return -EINVAL;  	}  	/* Set the bus ID string */ -	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), -			spi->chip_select); - +	spi_dev_set_name(spi);  	/* We need to make sure there's no other device with this  	 * chipselect **BEFORE** we call setup(), else we'll trash @@ -285,15 +424,16 @@ int spi_add_device(struct spi_device *spi)  	 */  	mutex_lock(&spi_add_lock); -	d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)); -	if (d != NULL) { +	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); +	if (status) {  		dev_err(dev, "chipselect %d already in use\n",  				spi->chip_select); -		put_device(d); -		status = -EBUSY;  		goto done;  	} +	if (master->cs_gpios) +		spi->cs_gpio = master->cs_gpios[spi->chip_select]; +  	/* Drivers may modify this initial i/o setup, but will  	 * normally rely on the device being setup.  Devices  	 * using SPI_CS_HIGH can't coexist well otherwise... @@ -404,8 +544,7 @@ static void spi_match_master_to_boardinfo(struct spi_master *master,   * The board info passed can safely be __initdata ... but be careful of   * any embedded pointers (platform_data, etc), they're copied as-is.   */ -int __init -spi_register_board_info(struct spi_board_info const *info, unsigned n) +int spi_register_board_info(struct spi_board_info const *info, unsigned n)  {  	struct boardinfo *bi;  	int i; @@ -430,6 +569,860 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)  /*-------------------------------------------------------------------------*/ +static void spi_set_cs(struct spi_device *spi, bool enable) +{ +	if (spi->mode & SPI_CS_HIGH) +		enable = !enable; + +	if (spi->cs_gpio >= 0) +		gpio_set_value(spi->cs_gpio, !enable); +	else if (spi->master->set_cs) +		spi->master->set_cs(spi, !enable); +} + +#ifdef CONFIG_HAS_DMA +static int spi_map_buf(struct spi_master *master, struct device *dev, +		       struct sg_table *sgt, void *buf, size_t len, +		       enum dma_data_direction dir) +{ +	const bool vmalloced_buf = is_vmalloc_addr(buf); +	const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len; +	const int sgs = DIV_ROUND_UP(len, desc_len); +	struct page *vm_page; +	void *sg_buf; +	size_t min; +	int i, ret; + +	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); +	if (ret != 0) +		return ret; + +	for (i = 0; i < sgs; i++) { +		min = min_t(size_t, len, desc_len); + +		if (vmalloced_buf) { +			vm_page = vmalloc_to_page(buf); +			if (!vm_page) { +				sg_free_table(sgt); +				return -ENOMEM; +			} +			sg_buf = page_address(vm_page) + +				((size_t)buf & ~PAGE_MASK); +		} else { +			sg_buf = buf; +		} + +		sg_set_buf(&sgt->sgl[i], sg_buf, min); + +		buf += min; +		len -= min; +	} + +	ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); +	if (ret < 0) { +		sg_free_table(sgt); +		return ret; +	} + +	sgt->nents = ret; + +	return 0; +} + +static void spi_unmap_buf(struct spi_master *master, struct device *dev, +			  struct sg_table *sgt, enum dma_data_direction dir) +{ +	if (sgt->orig_nents) { +		dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); +		sg_free_table(sgt); +	} +} + +static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) +{ +	struct device *tx_dev, *rx_dev; +	struct spi_transfer *xfer; +	int ret; + +	if (!master->can_dma) +		return 0; + +	tx_dev = &master->dma_tx->dev->device; +	rx_dev = &master->dma_rx->dev->device; + +	list_for_each_entry(xfer, &msg->transfers, transfer_list) { +		if (!master->can_dma(master, msg->spi, xfer)) +			continue; + +		if (xfer->tx_buf != NULL) { +			ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, +					  (void *)xfer->tx_buf, xfer->len, +					  DMA_TO_DEVICE); +			if (ret != 0) +				return ret; +		} + +		if (xfer->rx_buf != NULL) { +			ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, +					  xfer->rx_buf, xfer->len, +					  DMA_FROM_DEVICE); +			if (ret != 0) { +				spi_unmap_buf(master, tx_dev, &xfer->tx_sg, +					      DMA_TO_DEVICE); +				return ret; +			} +		} +	} + +	master->cur_msg_mapped = true; + +	return 0; +} + +static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) +{ +	struct spi_transfer *xfer; +	struct device *tx_dev, *rx_dev; + +	if (!master->cur_msg_mapped || !master->can_dma) +		return 0; + +	tx_dev = &master->dma_tx->dev->device; +	rx_dev = &master->dma_rx->dev->device; + +	list_for_each_entry(xfer, &msg->transfers, transfer_list) { +		if (!master->can_dma(master, msg->spi, xfer)) +			continue; + +		spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); +		spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); +	} + +	return 0; +} +#else /* !CONFIG_HAS_DMA */ +static inline int __spi_map_msg(struct spi_master *master, +				struct spi_message *msg) +{ +	return 0; +} + +static inline int spi_unmap_msg(struct spi_master *master, +				struct spi_message *msg) +{ +	return 0; +} +#endif /* !CONFIG_HAS_DMA */ + +static int spi_map_msg(struct spi_master *master, struct spi_message *msg) +{ +	struct spi_transfer *xfer; +	void *tmp; +	unsigned int max_tx, max_rx; + +	if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { +		max_tx = 0; +		max_rx = 0; + +		list_for_each_entry(xfer, &msg->transfers, transfer_list) { +			if ((master->flags & SPI_MASTER_MUST_TX) && +			    !xfer->tx_buf) +				max_tx = max(xfer->len, max_tx); +			if ((master->flags & SPI_MASTER_MUST_RX) && +			    !xfer->rx_buf) +				max_rx = max(xfer->len, max_rx); +		} + +		if (max_tx) { +			tmp = krealloc(master->dummy_tx, max_tx, +				       GFP_KERNEL | GFP_DMA); +			if (!tmp) +				return -ENOMEM; +			master->dummy_tx = tmp; +			memset(tmp, 0, max_tx); +		} + +		if (max_rx) { +			tmp = krealloc(master->dummy_rx, max_rx, +				       GFP_KERNEL | GFP_DMA); +			if (!tmp) +				return -ENOMEM; +			master->dummy_rx = tmp; +		} + +		if (max_tx || max_rx) { +			list_for_each_entry(xfer, &msg->transfers, +					    transfer_list) { +				if (!xfer->tx_buf) +					xfer->tx_buf = master->dummy_tx; +				if (!xfer->rx_buf) +					xfer->rx_buf = master->dummy_rx; +			} +		} +	} + +	return __spi_map_msg(master, msg); +} + +/* + * spi_transfer_one_message - Default implementation of transfer_one_message() + * + * This is a standard implementation of transfer_one_message() for + * drivers which impelment a transfer_one() operation.  It provides + * standard handling of delays and chip select management. + */ +static int spi_transfer_one_message(struct spi_master *master, +				    struct spi_message *msg) +{ +	struct spi_transfer *xfer; +	bool keep_cs = false; +	int ret = 0; +	int ms = 1; + +	spi_set_cs(msg->spi, true); + +	list_for_each_entry(xfer, &msg->transfers, transfer_list) { +		trace_spi_transfer_start(msg, xfer); + +		reinit_completion(&master->xfer_completion); + +		ret = master->transfer_one(master, msg->spi, xfer); +		if (ret < 0) { +			dev_err(&msg->spi->dev, +				"SPI transfer failed: %d\n", ret); +			goto out; +		} + +		if (ret > 0) { +			ret = 0; +			ms = xfer->len * 8 * 1000 / xfer->speed_hz; +			ms += ms + 100; /* some tolerance */ + +			ms = wait_for_completion_timeout(&master->xfer_completion, +							 msecs_to_jiffies(ms)); +		} + +		if (ms == 0) { +			dev_err(&msg->spi->dev, "SPI transfer timed out\n"); +			msg->status = -ETIMEDOUT; +		} + +		trace_spi_transfer_stop(msg, xfer); + +		if (msg->status != -EINPROGRESS) +			goto out; + +		if (xfer->delay_usecs) +			udelay(xfer->delay_usecs); + +		if (xfer->cs_change) { +			if (list_is_last(&xfer->transfer_list, +					 &msg->transfers)) { +				keep_cs = true; +			} else { +				spi_set_cs(msg->spi, false); +				udelay(10); +				spi_set_cs(msg->spi, true); +			} +		} + +		msg->actual_length += xfer->len; +	} + +out: +	if (ret != 0 || !keep_cs) +		spi_set_cs(msg->spi, false); + +	if (msg->status == -EINPROGRESS) +		msg->status = ret; + +	spi_finalize_current_message(master); + +	return ret; +} + +/** + * spi_finalize_current_transfer - report completion of a transfer + * + * Called by SPI drivers using the core transfer_one_message() + * implementation to notify it that the current interrupt driven + * transfer has finished and the next one may be scheduled. + */ +void spi_finalize_current_transfer(struct spi_master *master) +{ +	complete(&master->xfer_completion); +} +EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); + +/** + * spi_pump_messages - kthread work function which processes spi message queue + * @work: pointer to kthread work struct contained in the master struct + * + * This function checks if there is any spi message in the queue that + * needs processing and if so call out to the driver to initialize hardware + * and transfer each message. + * + */ +static void spi_pump_messages(struct kthread_work *work) +{ +	struct spi_master *master = +		container_of(work, struct spi_master, pump_messages); +	unsigned long flags; +	bool was_busy = false; +	int ret; + +	/* Lock queue and check for queue work */ +	spin_lock_irqsave(&master->queue_lock, flags); +	if (list_empty(&master->queue) || !master->running) { +		if (!master->busy) { +			spin_unlock_irqrestore(&master->queue_lock, flags); +			return; +		} +		master->busy = false; +		spin_unlock_irqrestore(&master->queue_lock, flags); +		kfree(master->dummy_rx); +		master->dummy_rx = NULL; +		kfree(master->dummy_tx); +		master->dummy_tx = NULL; +		if (master->unprepare_transfer_hardware && +		    master->unprepare_transfer_hardware(master)) +			dev_err(&master->dev, +				"failed to unprepare transfer hardware\n"); +		if (master->auto_runtime_pm) { +			pm_runtime_mark_last_busy(master->dev.parent); +			pm_runtime_put_autosuspend(master->dev.parent); +		} +		trace_spi_master_idle(master); +		return; +	} + +	/* Make sure we are not already running a message */ +	if (master->cur_msg) { +		spin_unlock_irqrestore(&master->queue_lock, flags); +		return; +	} +	/* Extract head of queue */ +	master->cur_msg = +		list_first_entry(&master->queue, struct spi_message, queue); + +	list_del_init(&master->cur_msg->queue); +	if (master->busy) +		was_busy = true; +	else +		master->busy = true; +	spin_unlock_irqrestore(&master->queue_lock, flags); + +	if (!was_busy && master->auto_runtime_pm) { +		ret = pm_runtime_get_sync(master->dev.parent); +		if (ret < 0) { +			dev_err(&master->dev, "Failed to power device: %d\n", +				ret); +			return; +		} +	} + +	if (!was_busy) +		trace_spi_master_busy(master); + +	if (!was_busy && master->prepare_transfer_hardware) { +		ret = master->prepare_transfer_hardware(master); +		if (ret) { +			dev_err(&master->dev, +				"failed to prepare transfer hardware\n"); + +			if (master->auto_runtime_pm) +				pm_runtime_put(master->dev.parent); +			return; +		} +	} + +	trace_spi_message_start(master->cur_msg); + +	if (master->prepare_message) { +		ret = master->prepare_message(master, master->cur_msg); +		if (ret) { +			dev_err(&master->dev, +				"failed to prepare message: %d\n", ret); +			master->cur_msg->status = ret; +			spi_finalize_current_message(master); +			return; +		} +		master->cur_msg_prepared = true; +	} + +	ret = spi_map_msg(master, master->cur_msg); +	if (ret) { +		master->cur_msg->status = ret; +		spi_finalize_current_message(master); +		return; +	} + +	ret = master->transfer_one_message(master, master->cur_msg); +	if (ret) { +		dev_err(&master->dev, +			"failed to transfer one message from queue\n"); +		return; +	} +} + +static int spi_init_queue(struct spi_master *master) +{ +	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; + +	INIT_LIST_HEAD(&master->queue); +	spin_lock_init(&master->queue_lock); + +	master->running = false; +	master->busy = false; + +	init_kthread_worker(&master->kworker); +	master->kworker_task = kthread_run(kthread_worker_fn, +					   &master->kworker, "%s", +					   dev_name(&master->dev)); +	if (IS_ERR(master->kworker_task)) { +		dev_err(&master->dev, "failed to create message pump task\n"); +		return -ENOMEM; +	} +	init_kthread_work(&master->pump_messages, spi_pump_messages); + +	/* +	 * Master config will indicate if this controller should run the +	 * message pump with high (realtime) priority to reduce the transfer +	 * latency on the bus by minimising the delay between a transfer +	 * request and the scheduling of the message pump thread. Without this +	 * setting the message pump thread will remain at default priority. +	 */ +	if (master->rt) { +		dev_info(&master->dev, +			"will run message pump with realtime priority\n"); +		sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); +	} + +	return 0; +} + +/** + * spi_get_next_queued_message() - called by driver to check for queued + * messages + * @master: the master to check for queued messages + * + * If there are more messages in the queue, the next message is returned from + * this call. + */ +struct spi_message *spi_get_next_queued_message(struct spi_master *master) +{ +	struct spi_message *next; +	unsigned long flags; + +	/* get a pointer to the next message, if any */ +	spin_lock_irqsave(&master->queue_lock, flags); +	next = list_first_entry_or_null(&master->queue, struct spi_message, +					queue); +	spin_unlock_irqrestore(&master->queue_lock, flags); + +	return next; +} +EXPORT_SYMBOL_GPL(spi_get_next_queued_message); + +/** + * spi_finalize_current_message() - the current message is complete + * @master: the master to return the message to + * + * Called by the driver to notify the core that the message in the front of the + * queue is complete and can be removed from the queue. + */ +void spi_finalize_current_message(struct spi_master *master) +{ +	struct spi_message *mesg; +	unsigned long flags; +	int ret; + +	spin_lock_irqsave(&master->queue_lock, flags); +	mesg = master->cur_msg; +	master->cur_msg = NULL; + +	queue_kthread_work(&master->kworker, &master->pump_messages); +	spin_unlock_irqrestore(&master->queue_lock, flags); + +	spi_unmap_msg(master, mesg); + +	if (master->cur_msg_prepared && master->unprepare_message) { +		ret = master->unprepare_message(master, mesg); +		if (ret) { +			dev_err(&master->dev, +				"failed to unprepare message: %d\n", ret); +		} +	} +	master->cur_msg_prepared = false; + +	mesg->state = NULL; +	if (mesg->complete) +		mesg->complete(mesg->context); + +	trace_spi_message_done(mesg); +} +EXPORT_SYMBOL_GPL(spi_finalize_current_message); + +static int spi_start_queue(struct spi_master *master) +{ +	unsigned long flags; + +	spin_lock_irqsave(&master->queue_lock, flags); + +	if (master->running || master->busy) { +		spin_unlock_irqrestore(&master->queue_lock, flags); +		return -EBUSY; +	} + +	master->running = true; +	master->cur_msg = NULL; +	spin_unlock_irqrestore(&master->queue_lock, flags); + +	queue_kthread_work(&master->kworker, &master->pump_messages); + +	return 0; +} + +static int spi_stop_queue(struct spi_master *master) +{ +	unsigned long flags; +	unsigned limit = 500; +	int ret = 0; + +	spin_lock_irqsave(&master->queue_lock, flags); + +	/* +	 * This is a bit lame, but is optimized for the common execution path. +	 * A wait_queue on the master->busy could be used, but then the common +	 * execution path (pump_messages) would be required to call wake_up or +	 * friends on every SPI message. Do this instead. +	 */ +	while ((!list_empty(&master->queue) || master->busy) && limit--) { +		spin_unlock_irqrestore(&master->queue_lock, flags); +		usleep_range(10000, 11000); +		spin_lock_irqsave(&master->queue_lock, flags); +	} + +	if (!list_empty(&master->queue) || master->busy) +		ret = -EBUSY; +	else +		master->running = false; + +	spin_unlock_irqrestore(&master->queue_lock, flags); + +	if (ret) { +		dev_warn(&master->dev, +			 "could not stop message queue\n"); +		return ret; +	} +	return ret; +} + +static int spi_destroy_queue(struct spi_master *master) +{ +	int ret; + +	ret = spi_stop_queue(master); + +	/* +	 * flush_kthread_worker will block until all work is done. +	 * If the reason that stop_queue timed out is that the work will never +	 * finish, then it does no good to call flush/stop thread, so +	 * return anyway. +	 */ +	if (ret) { +		dev_err(&master->dev, "problem destroying queue\n"); +		return ret; +	} + +	flush_kthread_worker(&master->kworker); +	kthread_stop(master->kworker_task); + +	return 0; +} + +/** + * spi_queued_transfer - transfer function for queued transfers + * @spi: spi device which is requesting transfer + * @msg: spi message which is to handled is queued to driver queue + */ +static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) +{ +	struct spi_master *master = spi->master; +	unsigned long flags; + +	spin_lock_irqsave(&master->queue_lock, flags); + +	if (!master->running) { +		spin_unlock_irqrestore(&master->queue_lock, flags); +		return -ESHUTDOWN; +	} +	msg->actual_length = 0; +	msg->status = -EINPROGRESS; + +	list_add_tail(&msg->queue, &master->queue); +	if (!master->busy) +		queue_kthread_work(&master->kworker, &master->pump_messages); + +	spin_unlock_irqrestore(&master->queue_lock, flags); +	return 0; +} + +static int spi_master_initialize_queue(struct spi_master *master) +{ +	int ret; + +	master->transfer = spi_queued_transfer; +	if (!master->transfer_one_message) +		master->transfer_one_message = spi_transfer_one_message; + +	/* Initialize and start queue */ +	ret = spi_init_queue(master); +	if (ret) { +		dev_err(&master->dev, "problem initializing queue\n"); +		goto err_init_queue; +	} +	master->queued = true; +	ret = spi_start_queue(master); +	if (ret) { +		dev_err(&master->dev, "problem starting queue\n"); +		goto err_start_queue; +	} + +	return 0; + +err_start_queue: +	spi_destroy_queue(master); +err_init_queue: +	return ret; +} + +/*-------------------------------------------------------------------------*/ + +#if defined(CONFIG_OF) +/** + * of_register_spi_devices() - Register child devices onto the SPI bus + * @master:	Pointer to spi_master device + * + * Registers an spi_device for each child node of master node which has a 'reg' + * property. + */ +static void of_register_spi_devices(struct spi_master *master) +{ +	struct spi_device *spi; +	struct device_node *nc; +	int rc; +	u32 value; + +	if (!master->dev.of_node) +		return; + +	for_each_available_child_of_node(master->dev.of_node, nc) { +		/* Alloc an spi_device */ +		spi = spi_alloc_device(master); +		if (!spi) { +			dev_err(&master->dev, "spi_device alloc error for %s\n", +				nc->full_name); +			spi_dev_put(spi); +			continue; +		} + +		/* Select device driver */ +		if (of_modalias_node(nc, spi->modalias, +				     sizeof(spi->modalias)) < 0) { +			dev_err(&master->dev, "cannot find modalias for %s\n", +				nc->full_name); +			spi_dev_put(spi); +			continue; +		} + +		/* Device address */ +		rc = of_property_read_u32(nc, "reg", &value); +		if (rc) { +			dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", +				nc->full_name, rc); +			spi_dev_put(spi); +			continue; +		} +		spi->chip_select = value; + +		/* Mode (clock phase/polarity/etc.) */ +		if (of_find_property(nc, "spi-cpha", NULL)) +			spi->mode |= SPI_CPHA; +		if (of_find_property(nc, "spi-cpol", NULL)) +			spi->mode |= SPI_CPOL; +		if (of_find_property(nc, "spi-cs-high", NULL)) +			spi->mode |= SPI_CS_HIGH; +		if (of_find_property(nc, "spi-3wire", NULL)) +			spi->mode |= SPI_3WIRE; +		if (of_find_property(nc, "spi-lsb-first", NULL)) +			spi->mode |= SPI_LSB_FIRST; + +		/* Device DUAL/QUAD mode */ +		if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { +			switch (value) { +			case 1: +				break; +			case 2: +				spi->mode |= SPI_TX_DUAL; +				break; +			case 4: +				spi->mode |= SPI_TX_QUAD; +				break; +			default: +				dev_warn(&master->dev, +					 "spi-tx-bus-width %d not supported\n", +					 value); +				break; +			} +		} + +		if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { +			switch (value) { +			case 1: +				break; +			case 2: +				spi->mode |= SPI_RX_DUAL; +				break; +			case 4: +				spi->mode |= SPI_RX_QUAD; +				break; +			default: +				dev_warn(&master->dev, +					 "spi-rx-bus-width %d not supported\n", +					 value); +				break; +			} +		} + +		/* Device speed */ +		rc = of_property_read_u32(nc, "spi-max-frequency", &value); +		if (rc) { +			dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", +				nc->full_name, rc); +			spi_dev_put(spi); +			continue; +		} +		spi->max_speed_hz = value; + +		/* IRQ */ +		spi->irq = irq_of_parse_and_map(nc, 0); + +		/* Store a pointer to the node in the device structure */ +		of_node_get(nc); +		spi->dev.of_node = nc; + +		/* Register the new device */ +		request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias); +		rc = spi_add_device(spi); +		if (rc) { +			dev_err(&master->dev, "spi_device register error %s\n", +				nc->full_name); +			spi_dev_put(spi); +		} + +	} +} +#else +static void of_register_spi_devices(struct spi_master *master) { } +#endif + +#ifdef CONFIG_ACPI +static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) +{ +	struct spi_device *spi = data; + +	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { +		struct acpi_resource_spi_serialbus *sb; + +		sb = &ares->data.spi_serial_bus; +		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { +			spi->chip_select = sb->device_selection; +			spi->max_speed_hz = sb->connection_speed; + +			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) +				spi->mode |= SPI_CPHA; +			if (sb->clock_polarity == ACPI_SPI_START_HIGH) +				spi->mode |= SPI_CPOL; +			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) +				spi->mode |= SPI_CS_HIGH; +		} +	} else if (spi->irq < 0) { +		struct resource r; + +		if (acpi_dev_resource_interrupt(ares, 0, &r)) +			spi->irq = r.start; +	} + +	/* Always tell the ACPI core to skip this resource */ +	return 1; +} + +static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, +				       void *data, void **return_value) +{ +	struct spi_master *master = data; +	struct list_head resource_list; +	struct acpi_device *adev; +	struct spi_device *spi; +	int ret; + +	if (acpi_bus_get_device(handle, &adev)) +		return AE_OK; +	if (acpi_bus_get_status(adev) || !adev->status.present) +		return AE_OK; + +	spi = spi_alloc_device(master); +	if (!spi) { +		dev_err(&master->dev, "failed to allocate SPI device for %s\n", +			dev_name(&adev->dev)); +		return AE_NO_MEMORY; +	} + +	ACPI_COMPANION_SET(&spi->dev, adev); +	spi->irq = -1; + +	INIT_LIST_HEAD(&resource_list); +	ret = acpi_dev_get_resources(adev, &resource_list, +				     acpi_spi_add_resource, spi); +	acpi_dev_free_resource_list(&resource_list); + +	if (ret < 0 || !spi->max_speed_hz) { +		spi_dev_put(spi); +		return AE_OK; +	} + +	adev->power.flags.ignore_parent = true; +	strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); +	if (spi_add_device(spi)) { +		adev->power.flags.ignore_parent = false; +		dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", +			dev_name(&adev->dev)); +		spi_dev_put(spi); +	} + +	return AE_OK; +} + +static void acpi_register_spi_devices(struct spi_master *master) +{ +	acpi_status status; +	acpi_handle handle; + +	handle = ACPI_HANDLE(master->dev.parent); +	if (!handle) +		return; + +	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, +				     acpi_spi_add_device, NULL, +				     master, NULL); +	if (ACPI_FAILURE(status)) +		dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); +} +#else +static inline void acpi_register_spi_devices(struct spi_master *master) {} +#endif /* CONFIG_ACPI */ +  static void spi_master_release(struct device *dev)  {  	struct spi_master *master; @@ -445,6 +1438,7 @@ static struct class spi_master_class = {  }; +  /**   * spi_alloc_master - allocate SPI master controller   * @dev: the controller, possibly using the platform_bus @@ -462,7 +1456,8 @@ static struct class spi_master_class = {   *   * The caller is responsible for assigning the bus number and initializing   * the master's methods before calling spi_register_master(); and (after errors - * adding the device) calling spi_master_put() to prevent a memory leak. + * adding the device) calling spi_master_put() and kfree() to prevent a memory + * leak.   */  struct spi_master *spi_alloc_master(struct device *dev, unsigned size)  { @@ -471,11 +1466,13 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)  	if (!dev)  		return NULL; -	master = kzalloc(size + sizeof *master, GFP_KERNEL); +	master = kzalloc(size + sizeof(*master), GFP_KERNEL);  	if (!master)  		return NULL;  	device_initialize(&master->dev); +	master->bus_num = -1; +	master->num_chipselect = 1;  	master->dev.class = &spi_master_class;  	master->dev.parent = get_device(dev);  	spi_master_set_devdata(master, &master[1]); @@ -484,6 +1481,47 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)  }  EXPORT_SYMBOL_GPL(spi_alloc_master); +#ifdef CONFIG_OF +static int of_spi_register_master(struct spi_master *master) +{ +	int nb, i, *cs; +	struct device_node *np = master->dev.of_node; + +	if (!np) +		return 0; + +	nb = of_gpio_named_count(np, "cs-gpios"); +	master->num_chipselect = max_t(int, nb, master->num_chipselect); + +	/* Return error only for an incorrectly formed cs-gpios property */ +	if (nb == 0 || nb == -ENOENT) +		return 0; +	else if (nb < 0) +		return nb; + +	cs = devm_kzalloc(&master->dev, +			  sizeof(int) * master->num_chipselect, +			  GFP_KERNEL); +	master->cs_gpios = cs; + +	if (!master->cs_gpios) +		return -ENOMEM; + +	for (i = 0; i < master->num_chipselect; i++) +		cs[i] = -ENOENT; + +	for (i = 0; i < nb; i++) +		cs[i] = of_get_named_gpio(np, "cs-gpios", i); + +	return 0; +} +#else +static int of_spi_register_master(struct spi_master *master) +{ +	return 0; +} +#endif +  /**   * spi_register_master - register SPI master controller   * @master: initialized master, originally from spi_alloc_master() @@ -515,12 +1553,19 @@ int spi_register_master(struct spi_master *master)  	if (!dev)  		return -ENODEV; +	status = of_spi_register_master(master); +	if (status) +		return status; +  	/* even if it's just one always-selected device, there must  	 * be at least one chipselect  	 */  	if (master->num_chipselect == 0)  		return -EINVAL; +	if ((master->bus_num < 0) && master->dev.of_node) +		master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); +  	/* convention:  dynamically assigned bus IDs count down from the max */  	if (master->bus_num < 0) {  		/* FIXME switch to an IDR based scheme, something like @@ -533,6 +1578,9 @@ int spi_register_master(struct spi_master *master)  	spin_lock_init(&master->bus_lock_spinlock);  	mutex_init(&master->bus_lock_mutex);  	master->bus_lock_flag = 0; +	init_completion(&master->xfer_completion); +	if (!master->max_dma_len) +		master->max_dma_len = INT_MAX;  	/* register the device, then userspace will see it.  	 * registration fails if the bus ID is in use. @@ -544,21 +1592,65 @@ int spi_register_master(struct spi_master *master)  	dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),  			dynamic ? " (dynamic)" : ""); +	/* If we're using a queued driver, start the queue */ +	if (master->transfer) +		dev_info(dev, "master is unqueued, this is deprecated\n"); +	else { +		status = spi_master_initialize_queue(master); +		if (status) { +			device_del(&master->dev); +			goto done; +		} +	} +  	mutex_lock(&board_lock);  	list_add_tail(&master->list, &spi_master_list);  	list_for_each_entry(bi, &board_list, list)  		spi_match_master_to_boardinfo(master, &bi->board_info);  	mutex_unlock(&board_lock); -	status = 0; - -	/* Register devices from the device tree */ +	/* Register devices from the device tree and ACPI */  	of_register_spi_devices(master); +	acpi_register_spi_devices(master);  done:  	return status;  }  EXPORT_SYMBOL_GPL(spi_register_master); +static void devm_spi_unregister(struct device *dev, void *res) +{ +	spi_unregister_master(*(struct spi_master **)res); +} + +/** + * dev_spi_register_master - register managed SPI master controller + * @dev:    device managing SPI master + * @master: initialized master, originally from spi_alloc_master() + * Context: can sleep + * + * Register a SPI device as with spi_register_master() which will + * automatically be unregister + */ +int devm_spi_register_master(struct device *dev, struct spi_master *master) +{ +	struct spi_master **ptr; +	int ret; + +	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); +	if (!ptr) +		return -ENOMEM; + +	ret = spi_register_master(master); +	if (!ret) { +		*ptr = master; +		devres_add(dev, ptr); +	} else { +		devres_free(ptr); +	} + +	return ret; +} +EXPORT_SYMBOL_GPL(devm_spi_register_master);  static int __unregister(struct device *dev, void *null)  { @@ -580,20 +1672,55 @@ void spi_unregister_master(struct spi_master *master)  {  	int dummy; +	if (master->queued) { +		if (spi_destroy_queue(master)) +			dev_err(&master->dev, "queue remove failed\n"); +	} +  	mutex_lock(&board_lock);  	list_del(&master->list);  	mutex_unlock(&board_lock); -	dummy = device_for_each_child(master->dev.parent, &master->dev, -					__unregister); +	dummy = device_for_each_child(&master->dev, NULL, __unregister);  	device_unregister(&master->dev);  }  EXPORT_SYMBOL_GPL(spi_unregister_master); -static int __spi_master_match(struct device *dev, void *data) +int spi_master_suspend(struct spi_master *master) +{ +	int ret; + +	/* Basically no-ops for non-queued masters */ +	if (!master->queued) +		return 0; + +	ret = spi_stop_queue(master); +	if (ret) +		dev_err(&master->dev, "queue stop failed\n"); + +	return ret; +} +EXPORT_SYMBOL_GPL(spi_master_suspend); + +int spi_master_resume(struct spi_master *master) +{ +	int ret; + +	if (!master->queued) +		return 0; + +	ret = spi_start_queue(master); +	if (ret) +		dev_err(&master->dev, "queue restart failed\n"); + +	return ret; +} +EXPORT_SYMBOL_GPL(spi_master_resume); + +static int __spi_master_match(struct device *dev, const void *data)  {  	struct spi_master *m; -	u16 *bus_num = data; +	const u16 *bus_num = data;  	m = container_of(dev, struct spi_master, dev);  	return m->bus_num == *bus_num; @@ -650,13 +1777,35 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);   */  int spi_setup(struct spi_device *spi)  { -	unsigned	bad_bits; -	int		status; +	unsigned	bad_bits, ugly_bits; +	int		status = 0; +	/* check mode to prevent that DUAL and QUAD set at the same time +	 */ +	if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || +		((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { +		dev_err(&spi->dev, +		"setup: can not select dual and quad at the same time\n"); +		return -EINVAL; +	} +	/* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden +	 */ +	if ((spi->mode & SPI_3WIRE) && (spi->mode & +		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) +		return -EINVAL;  	/* help drivers fail *cleanly* when they need options  	 * that aren't supported with their current master  	 */  	bad_bits = spi->mode & ~spi->master->mode_bits; +	ugly_bits = bad_bits & +		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); +	if (ugly_bits) { +		dev_warn(&spi->dev, +			 "setup: ignoring unsupported mode bits %x\n", +			 ugly_bits); +		spi->mode &= ~ugly_bits; +		bad_bits &= ~ugly_bits; +	}  	if (bad_bits) {  		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",  			bad_bits); @@ -666,10 +1815,13 @@ int spi_setup(struct spi_device *spi)  	if (!spi->bits_per_word)  		spi->bits_per_word = 8; -	status = spi->master->setup(spi); +	if (!spi->max_speed_hz) +		spi->max_speed_hz = spi->master->max_speed_hz; + +	if (spi->master->setup) +		status = spi->master->setup(spi); -	dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s" -				"%u bits/w, %u Hz max --> %d\n", +	dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",  			(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),  			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",  			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", @@ -682,9 +1834,14 @@ int spi_setup(struct spi_device *spi)  }  EXPORT_SYMBOL_GPL(spi_setup); -static int __spi_async(struct spi_device *spi, struct spi_message *message) +static int __spi_validate(struct spi_device *spi, struct spi_message *message)  {  	struct spi_master *master = spi->master; +	struct spi_transfer *xfer; +	int w_size; + +	if (list_empty(&message->transfers)) +		return -EINVAL;  	/* Half-duplex links include original MicroWire, and ones with  	 * only one data pin like SPI_3WIRE (switches direction) or where @@ -693,7 +1850,6 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)  	 */  	if ((master->flags & SPI_MASTER_HALF_DUPLEX)  			|| (spi->mode & SPI_3WIRE)) { -		struct spi_transfer *xfer;  		unsigned flags = master->flags;  		list_for_each_entry(xfer, &message->transfers, transfer_list) { @@ -706,8 +1862,100 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)  		}  	} -	message->spi = spi; +	/** +	 * Set transfer bits_per_word and max speed as spi device default if +	 * it is not set for this transfer. +	 * Set transfer tx_nbits and rx_nbits as single transfer default +	 * (SPI_NBITS_SINGLE) if it is not set for this transfer. +	 */ +	list_for_each_entry(xfer, &message->transfers, transfer_list) { +		message->frame_length += xfer->len; +		if (!xfer->bits_per_word) +			xfer->bits_per_word = spi->bits_per_word; + +		if (!xfer->speed_hz) +			xfer->speed_hz = spi->max_speed_hz; + +		if (master->max_speed_hz && +		    xfer->speed_hz > master->max_speed_hz) +			xfer->speed_hz = master->max_speed_hz; + +		if (master->bits_per_word_mask) { +			/* Only 32 bits fit in the mask */ +			if (xfer->bits_per_word > 32) +				return -EINVAL; +			if (!(master->bits_per_word_mask & +					BIT(xfer->bits_per_word - 1))) +				return -EINVAL; +		} + +		/* +		 * SPI transfer length should be multiple of SPI word size +		 * where SPI word size should be power-of-two multiple +		 */ +		if (xfer->bits_per_word <= 8) +			w_size = 1; +		else if (xfer->bits_per_word <= 16) +			w_size = 2; +		else +			w_size = 4; + +		/* No partial transfers accepted */ +		if (xfer->len % w_size) +			return -EINVAL; + +		if (xfer->speed_hz && master->min_speed_hz && +		    xfer->speed_hz < master->min_speed_hz) +			return -EINVAL; + +		if (xfer->tx_buf && !xfer->tx_nbits) +			xfer->tx_nbits = SPI_NBITS_SINGLE; +		if (xfer->rx_buf && !xfer->rx_nbits) +			xfer->rx_nbits = SPI_NBITS_SINGLE; +		/* check transfer tx/rx_nbits: +		 * 1. check the value matches one of single, dual and quad +		 * 2. check tx/rx_nbits match the mode in spi_device +		 */ +		if (xfer->tx_buf) { +			if (xfer->tx_nbits != SPI_NBITS_SINGLE && +				xfer->tx_nbits != SPI_NBITS_DUAL && +				xfer->tx_nbits != SPI_NBITS_QUAD) +				return -EINVAL; +			if ((xfer->tx_nbits == SPI_NBITS_DUAL) && +				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) +				return -EINVAL; +			if ((xfer->tx_nbits == SPI_NBITS_QUAD) && +				!(spi->mode & SPI_TX_QUAD)) +				return -EINVAL; +		} +		/* check transfer rx_nbits */ +		if (xfer->rx_buf) { +			if (xfer->rx_nbits != SPI_NBITS_SINGLE && +				xfer->rx_nbits != SPI_NBITS_DUAL && +				xfer->rx_nbits != SPI_NBITS_QUAD) +				return -EINVAL; +			if ((xfer->rx_nbits == SPI_NBITS_DUAL) && +				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) +				return -EINVAL; +			if ((xfer->rx_nbits == SPI_NBITS_QUAD) && +				!(spi->mode & SPI_RX_QUAD)) +				return -EINVAL; +		} +	} +  	message->status = -EINPROGRESS; + +	return 0; +} + +static int __spi_async(struct spi_device *spi, struct spi_message *message) +{ +	struct spi_master *master = spi->master; + +	message->spi = spi; + +	trace_spi_message_submit(message); +  	return master->transfer(spi, message);  } @@ -746,6 +1994,10 @@ int spi_async(struct spi_device *spi, struct spi_message *message)  	int ret;  	unsigned long flags; +	ret = __spi_validate(spi, message); +	if (ret != 0) +		return ret; +  	spin_lock_irqsave(&master->bus_lock_spinlock, flags);  	if (master->bus_lock_flag) @@ -794,6 +2046,10 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message)  	int ret;  	unsigned long flags; +	ret = __spi_validate(spi, message); +	if (ret != 0) +		return ret; +  	spin_lock_irqsave(&master->bus_lock_spinlock, flags);  	ret = __spi_async(spi, message); @@ -882,7 +2138,7 @@ EXPORT_SYMBOL_GPL(spi_sync);   * drivers may DMA directly into and out of the message buffers.   *   * This call should be used by drivers that require exclusive access to the - * SPI bus. It has to be preceeded by a spi_bus_lock call. The SPI bus must + * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must   * be released by a spi_bus_unlock call when the exclusive access is over.   *   * It returns zero on success, else a negative error code. @@ -948,7 +2204,7 @@ int spi_bus_unlock(struct spi_master *master)  EXPORT_SYMBOL_GPL(spi_bus_unlock);  /* portable code must never pass more than 32 bytes */ -#define	SPI_BUFSIZ	max(32,SMP_CACHE_BYTES) +#define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)  static u8	*buf; @@ -972,8 +2228,8 @@ static u8	*buf;   * spi_{async,sync}() calls with dma-safe buffers.   */  int spi_write_then_read(struct spi_device *spi, -		const u8 *txbuf, unsigned n_tx, -		u8 *rxbuf, unsigned n_rx) +		const void *txbuf, unsigned n_tx, +		void *rxbuf, unsigned n_rx)  {  	static DEFINE_MUTEX(lock); @@ -982,15 +2238,22 @@ int spi_write_then_read(struct spi_device *spi,  	struct spi_transfer	x[2];  	u8			*local_buf; -	/* Use preallocated DMA-safe buffer.  We can't avoid copying here, -	 * (as a pure convenience thing), but we can keep heap costs -	 * out of the hot path ... +	/* Use preallocated DMA-safe buffer if we can.  We can't avoid +	 * copying here, (as a pure convenience thing), but we can +	 * keep heap costs out of the hot path unless someone else is +	 * using the pre-allocated buffer or the transfer is too large.  	 */ -	if ((n_tx + n_rx) > SPI_BUFSIZ) -		return -EINVAL; +	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { +		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), +				    GFP_KERNEL | GFP_DMA); +		if (!local_buf) +			return -ENOMEM; +	} else { +		local_buf = buf; +	}  	spi_message_init(&message); -	memset(x, 0, sizeof x); +	memset(x, 0, sizeof(x));  	if (n_tx) {  		x[0].len = n_tx;  		spi_message_add_tail(&x[0], &message); @@ -1000,14 +2263,6 @@ int spi_write_then_read(struct spi_device *spi,  		spi_message_add_tail(&x[1], &message);  	} -	/* ... unless someone else is using the pre-allocated buffer */ -	if (!mutex_trylock(&lock)) { -		local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); -		if (!local_buf) -			return -ENOMEM; -	} else -		local_buf = buf; -  	memcpy(local_buf, txbuf, n_tx);  	x[0].tx_buf = local_buf;  	x[1].rx_buf = local_buf + n_tx;  | 
