diff options
Diffstat (limited to 'drivers/spi/spi-s3c64xx.c')
| -rw-r--r-- | drivers/spi/spi-s3c64xx.c | 645 | 
1 files changed, 208 insertions, 437 deletions
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 512b8893893..75a56968b14 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -19,7 +19,6 @@  #include <linux/init.h>  #include <linux/module.h> -#include <linux/workqueue.h>  #include <linux/interrupt.h>  #include <linux/delay.h>  #include <linux/clk.h> @@ -34,10 +33,6 @@  #include <linux/platform_data/spi-s3c64xx.h> -#ifdef CONFIG_S3C_DMA -#include <mach/dma.h> -#endif -  #define MAX_SPI_PORTS		3  #define S3C64XX_SPI_QUIRK_POLL		(1 << 0) @@ -200,12 +195,8 @@ struct s3c64xx_spi_driver_data {  	unsigned                        cur_speed;  	struct s3c64xx_spi_dma_data	rx_dma;  	struct s3c64xx_spi_dma_data	tx_dma; -#ifdef CONFIG_S3C_DMA -	struct samsung_dma_ops		*ops; -#endif  	struct s3c64xx_spi_port_config	*port_conf;  	unsigned int			port_id; -	unsigned long			gpios[4];  	bool				cs_gpio;  }; @@ -285,104 +276,8 @@ static void s3c64xx_spi_dmacb(void *data)  	spin_unlock_irqrestore(&sdd->lock, flags);  } -#ifdef CONFIG_S3C_DMA -/* FIXME: remove this section once arch/arm/mach-s3c64xx uses dmaengine */ - -static struct s3c2410_dma_client s3c64xx_spi_dma_client = { -	.name = "samsung-spi-dma", -}; -  static void prepare_dma(struct s3c64xx_spi_dma_data *dma, -					unsigned len, dma_addr_t buf) -{ -	struct s3c64xx_spi_driver_data *sdd; -	struct samsung_dma_prep info; -	struct samsung_dma_config config; - -	if (dma->direction == DMA_DEV_TO_MEM) { -		sdd = container_of((void *)dma, -			struct s3c64xx_spi_driver_data, rx_dma); -		config.direction = sdd->rx_dma.direction; -		config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA; -		config.width = sdd->cur_bpw / 8; -		sdd->ops->config((enum dma_ch)sdd->rx_dma.ch, &config); -	} else { -		sdd = container_of((void *)dma, -			struct s3c64xx_spi_driver_data, tx_dma); -		config.direction =  sdd->tx_dma.direction; -		config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA; -		config.width = sdd->cur_bpw / 8; -		sdd->ops->config((enum dma_ch)sdd->tx_dma.ch, &config); -	} - -	info.cap = DMA_SLAVE; -	info.len = len; -	info.fp = s3c64xx_spi_dmacb; -	info.fp_param = dma; -	info.direction = dma->direction; -	info.buf = buf; - -	sdd->ops->prepare((enum dma_ch)dma->ch, &info); -	sdd->ops->trigger((enum dma_ch)dma->ch); -} - -static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) -{ -	struct samsung_dma_req req; -	struct device *dev = &sdd->pdev->dev; - -	sdd->ops = samsung_dma_get_ops(); - -	req.cap = DMA_SLAVE; -	req.client = &s3c64xx_spi_dma_client; - -	sdd->rx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request( -					sdd->rx_dma.dmach, &req, dev, "rx"); -	sdd->tx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request( -					sdd->tx_dma.dmach, &req, dev, "tx"); - -	return 1; -} - -static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) -{ -	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); - -	/* -	 * If DMA resource was not available during -	 * probe, no need to continue with dma requests -	 * else Acquire DMA channels -	 */ -	while (!is_polling(sdd) && !acquire_dma(sdd)) -		usleep_range(10000, 11000); - -	return 0; -} - -static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi) -{ -	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); - -	/* Free DMA channels */ -	if (!is_polling(sdd)) { -		sdd->ops->release((enum dma_ch)sdd->rx_dma.ch, -					&s3c64xx_spi_dma_client); -		sdd->ops->release((enum dma_ch)sdd->tx_dma.ch, -					&s3c64xx_spi_dma_client); -	} - -	return 0; -} - -static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, -				 struct s3c64xx_spi_dma_data *dma) -{ -	sdd->ops->stop((enum dma_ch)dma->ch); -} -#else - -static void prepare_dma(struct s3c64xx_spi_dma_data *dma, -					unsigned len, dma_addr_t buf) +			struct sg_table *sgt)  {  	struct s3c64xx_spi_driver_data *sdd;  	struct dma_slave_config config; @@ -408,8 +303,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,  		dmaengine_slave_config(dma->ch, &config);  	} -	desc = dmaengine_prep_slave_single(dma->ch, buf, len, -					dma->direction, DMA_PREP_INTERRUPT); +	desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents, +				       dma->direction, DMA_PREP_INTERRUPT);  	desc->callback = s3c64xx_spi_dmacb;  	desc->callback_param = dma; @@ -438,6 +333,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)  			ret = -EBUSY;  			goto out;  		} +		spi->dma_rx = sdd->rx_dma.ch;  		sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,  				   (void *)sdd->tx_dma.dmach, dev, "tx"); @@ -446,6 +342,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)  			ret = -EBUSY;  			goto out_rx;  		} +		spi->dma_tx = sdd->tx_dma.ch;  	}  	ret = pm_runtime_get_sync(&sdd->pdev->dev); @@ -478,12 +375,14 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)  	return 0;  } -static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, -				 struct s3c64xx_spi_dma_data *dma) +static bool s3c64xx_spi_can_dma(struct spi_master *master, +				struct spi_device *spi, +				struct spi_transfer *xfer)  { -	dmaengine_terminate_all(dma->ch); +	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); + +	return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;  } -#endif  static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,  				struct spi_device *spi, @@ -516,7 +415,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,  		chcfg |= S3C64XX_SPI_CH_TXCH_ON;  		if (dma_mode) {  			modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; -			prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); +			prepare_dma(&sdd->tx_dma, &xfer->tx_sg);  		} else {  			switch (sdd->cur_bpw) {  			case 32: @@ -548,7 +447,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,  			writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)  					| S3C64XX_SPI_PACKET_CNT_EN,  					regs + S3C64XX_SPI_PACKET_CNT); -			prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); +			prepare_dma(&sdd->rx_dma, &xfer->rx_sg);  		}  	} @@ -556,30 +455,6 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,  	writel(chcfg, regs + S3C64XX_SPI_CH_CFG);  } -static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd, -						struct spi_device *spi) -{ -	struct s3c64xx_spi_csinfo *cs; - -	if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */ -		if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ -			/* Deselect the last toggled device */ -			cs = sdd->tgl_spi->controller_data; -			if (sdd->cs_gpio) -				gpio_set_value(cs->line, -					spi->mode & SPI_CS_HIGH ? 0 : 1); -		} -		sdd->tgl_spi = NULL; -	} - -	cs = spi->controller_data; -	if (sdd->cs_gpio) -		gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0); - -	/* Start the signals */ -	writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL); -} -  static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,  					int timeout_ms)  { @@ -601,117 +476,111 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,  	return RX_FIFO_LVL(status, sdd);  } -static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, -				struct spi_transfer *xfer, int dma_mode) +static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd, +			struct spi_transfer *xfer)  {  	void __iomem *regs = sdd->regs;  	unsigned long val; +	u32 status;  	int ms;  	/* millisecs to xfer 'len' bytes @ 'cur_speed' */  	ms = xfer->len * 8 * 1000 / sdd->cur_speed;  	ms += 10; /* some tolerance */ -	if (dma_mode) { -		val = msecs_to_jiffies(ms) + 10; -		val = wait_for_completion_timeout(&sdd->xfer_completion, val); -	} else { -		u32 status; -		val = msecs_to_loops(ms); -		do { -			status = readl(regs + S3C64XX_SPI_STATUS); -		} while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); -	} +	val = msecs_to_jiffies(ms) + 10; +	val = wait_for_completion_timeout(&sdd->xfer_completion, val); -	if (dma_mode) { -		u32 status; - -		/* -		 * If the previous xfer was completed within timeout, then -		 * proceed further else return -EIO. -		 * DmaTx returns after simply writing data in the FIFO, -		 * w/o waiting for real transmission on the bus to finish. -		 * DmaRx returns only after Dma read data from FIFO which -		 * needs bus transmission to finish, so we don't worry if -		 * Xfer involved Rx(with or without Tx). -		 */ -		if (val && !xfer->rx_buf) { -			val = msecs_to_loops(10); +	/* +	 * If the previous xfer was completed within timeout, then +	 * proceed further else return -EIO. +	 * DmaTx returns after simply writing data in the FIFO, +	 * w/o waiting for real transmission on the bus to finish. +	 * DmaRx returns only after Dma read data from FIFO which +	 * needs bus transmission to finish, so we don't worry if +	 * Xfer involved Rx(with or without Tx). +	 */ +	if (val && !xfer->rx_buf) { +		val = msecs_to_loops(10); +		status = readl(regs + S3C64XX_SPI_STATUS); +		while ((TX_FIFO_LVL(status, sdd) +			|| !S3C64XX_SPI_ST_TX_DONE(status, sdd)) +		       && --val) { +			cpu_relax();  			status = readl(regs + S3C64XX_SPI_STATUS); -			while ((TX_FIFO_LVL(status, sdd) -				|| !S3C64XX_SPI_ST_TX_DONE(status, sdd)) -					&& --val) { -				cpu_relax(); -				status = readl(regs + S3C64XX_SPI_STATUS); -			} - -		} - -		/* If timed out while checking rx/tx status return error */ -		if (!val) -			return -EIO; -	} else { -		int loops; -		u32 cpy_len; -		u8 *buf; - -		/* If it was only Tx */ -		if (!xfer->rx_buf) { -			sdd->state &= ~TXBUSY; -			return 0;  		} -		/* -		 * If the receive length is bigger than the controller fifo -		 * size, calculate the loops and read the fifo as many times. -		 * loops = length / max fifo size (calculated by using the -		 * fifo mask). -		 * For any size less than the fifo size the below code is -		 * executed atleast once. -		 */ -		loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1); -		buf = xfer->rx_buf; -		do { -			/* wait for data to be received in the fifo */ -			cpy_len = s3c64xx_spi_wait_for_timeout(sdd, -						(loops ? ms : 0)); - -			switch (sdd->cur_bpw) { -			case 32: -				ioread32_rep(regs + S3C64XX_SPI_RX_DATA, -					buf, cpy_len / 4); -				break; -			case 16: -				ioread16_rep(regs + S3C64XX_SPI_RX_DATA, -					buf, cpy_len / 2); -				break; -			default: -				ioread8_rep(regs + S3C64XX_SPI_RX_DATA, -					buf, cpy_len); -				break; -			} - -			buf = buf + cpy_len; -		} while (loops--); -		sdd->state &= ~RXBUSY;  	} +	/* If timed out while checking rx/tx status return error */ +	if (!val) +		return -EIO; +  	return 0;  } -static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, -						struct spi_device *spi) +static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd, +			struct spi_transfer *xfer)  { -	struct s3c64xx_spi_csinfo *cs = spi->controller_data; +	void __iomem *regs = sdd->regs; +	unsigned long val; +	u32 status; +	int loops; +	u32 cpy_len; +	u8 *buf; +	int ms; -	if (sdd->tgl_spi == spi) -		sdd->tgl_spi = NULL; +	/* millisecs to xfer 'len' bytes @ 'cur_speed' */ +	ms = xfer->len * 8 * 1000 / sdd->cur_speed; +	ms += 10; /* some tolerance */ -	if (sdd->cs_gpio) -		gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1); +	val = msecs_to_loops(ms); +	do { +		status = readl(regs + S3C64XX_SPI_STATUS); +	} while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); -	/* Quiese the signals */ -	writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); + +	/* If it was only Tx */ +	if (!xfer->rx_buf) { +		sdd->state &= ~TXBUSY; +		return 0; +	} + +	/* +	 * If the receive length is bigger than the controller fifo +	 * size, calculate the loops and read the fifo as many times. +	 * loops = length / max fifo size (calculated by using the +	 * fifo mask). +	 * For any size less than the fifo size the below code is +	 * executed atleast once. +	 */ +	loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1); +	buf = xfer->rx_buf; +	do { +		/* wait for data to be received in the fifo */ +		cpy_len = s3c64xx_spi_wait_for_timeout(sdd, +						       (loops ? ms : 0)); + +		switch (sdd->cur_bpw) { +		case 32: +			ioread32_rep(regs + S3C64XX_SPI_RX_DATA, +				     buf, cpy_len / 4); +			break; +		case 16: +			ioread16_rep(regs + S3C64XX_SPI_RX_DATA, +				     buf, cpy_len / 2); +			break; +		default: +			ioread8_rep(regs + S3C64XX_SPI_RX_DATA, +				    buf, cpy_len); +			break; +		} + +		buf = buf + cpy_len; +	} while (loops--); +	sdd->state &= ~RXBUSY; + +	return 0;  }  static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) @@ -787,91 +656,12 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)  #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) -static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, -						struct spi_message *msg) -{ -	struct device *dev = &sdd->pdev->dev; -	struct spi_transfer *xfer; - -	if (is_polling(sdd) || msg->is_dma_mapped) -		return 0; - -	/* First mark all xfer unmapped */ -	list_for_each_entry(xfer, &msg->transfers, transfer_list) { -		xfer->rx_dma = XFER_DMAADDR_INVALID; -		xfer->tx_dma = XFER_DMAADDR_INVALID; -	} - -	/* Map until end or first fail */ -	list_for_each_entry(xfer, &msg->transfers, transfer_list) { - -		if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1)) -			continue; - -		if (xfer->tx_buf != NULL) { -			xfer->tx_dma = dma_map_single(dev, -					(void *)xfer->tx_buf, xfer->len, -					DMA_TO_DEVICE); -			if (dma_mapping_error(dev, xfer->tx_dma)) { -				dev_err(dev, "dma_map_single Tx failed\n"); -				xfer->tx_dma = XFER_DMAADDR_INVALID; -				return -ENOMEM; -			} -		} - -		if (xfer->rx_buf != NULL) { -			xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, -						xfer->len, DMA_FROM_DEVICE); -			if (dma_mapping_error(dev, xfer->rx_dma)) { -				dev_err(dev, "dma_map_single Rx failed\n"); -				dma_unmap_single(dev, xfer->tx_dma, -						xfer->len, DMA_TO_DEVICE); -				xfer->tx_dma = XFER_DMAADDR_INVALID; -				xfer->rx_dma = XFER_DMAADDR_INVALID; -				return -ENOMEM; -			} -		} -	} - -	return 0; -} - -static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, -						struct spi_message *msg) -{ -	struct device *dev = &sdd->pdev->dev; -	struct spi_transfer *xfer; - -	if (is_polling(sdd) || msg->is_dma_mapped) -		return; - -	list_for_each_entry(xfer, &msg->transfers, transfer_list) { - -		if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1)) -			continue; - -		if (xfer->rx_buf != NULL -				&& xfer->rx_dma != XFER_DMAADDR_INVALID) -			dma_unmap_single(dev, xfer->rx_dma, -						xfer->len, DMA_FROM_DEVICE); - -		if (xfer->tx_buf != NULL -				&& xfer->tx_dma != XFER_DMAADDR_INVALID) -			dma_unmap_single(dev, xfer->tx_dma, -						xfer->len, DMA_TO_DEVICE); -	} -} - -static int s3c64xx_spi_transfer_one_message(struct spi_master *master, -					    struct spi_message *msg) +static int s3c64xx_spi_prepare_message(struct spi_master *master, +				       struct spi_message *msg)  {  	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);  	struct spi_device *spi = msg->spi;  	struct s3c64xx_spi_csinfo *cs = spi->controller_data; -	struct spi_transfer *xfer; -	int status = 0, cs_toggle = 0; -	u32 speed; -	u8 bpw;  	/* If Master's(controller) state differs from that needed by Slave */  	if (sdd->cur_speed != spi->max_speed_hz @@ -883,112 +673,80 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,  		s3c64xx_spi_config(sdd);  	} -	/* Map all the transfers if needed */ -	if (s3c64xx_spi_map_mssg(sdd, msg)) { -		dev_err(&spi->dev, -			"Xfer: Unable to map message buffers!\n"); -		status = -ENOMEM; -		goto out; -	} -  	/* Configure feedback delay */  	writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK); -	list_for_each_entry(xfer, &msg->transfers, transfer_list) { - -		unsigned long flags; -		int use_dma; - -		INIT_COMPLETION(sdd->xfer_completion); - -		/* Only BPW and Speed may change across transfers */ -		bpw = xfer->bits_per_word; -		speed = xfer->speed_hz ? : spi->max_speed_hz; - -		if (xfer->len % (bpw / 8)) { -			dev_err(&spi->dev, -				"Xfer length(%u) not a multiple of word size(%u)\n", -				xfer->len, bpw / 8); -			status = -EIO; -			goto out; -		} - -		if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { -			sdd->cur_bpw = bpw; -			sdd->cur_speed = speed; -			s3c64xx_spi_config(sdd); -		} +	return 0; +} -		/* Polling method for xfers not bigger than FIFO capacity */ -		use_dma = 0; -		if (!is_polling(sdd) && -			(sdd->rx_dma.ch && sdd->tx_dma.ch && -			(xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1)))) -			use_dma = 1; +static int s3c64xx_spi_transfer_one(struct spi_master *master, +				    struct spi_device *spi, +				    struct spi_transfer *xfer) +{ +	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); +	int status; +	u32 speed; +	u8 bpw; +	unsigned long flags; +	int use_dma; -		spin_lock_irqsave(&sdd->lock, flags); +	reinit_completion(&sdd->xfer_completion); -		/* Pending only which is to be done */ -		sdd->state &= ~RXBUSY; -		sdd->state &= ~TXBUSY; +	/* Only BPW and Speed may change across transfers */ +	bpw = xfer->bits_per_word; +	speed = xfer->speed_hz ? : spi->max_speed_hz; -		enable_datapath(sdd, spi, xfer, use_dma); - -		/* Slave Select */ -		enable_cs(sdd, spi); +	if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { +		sdd->cur_bpw = bpw; +		sdd->cur_speed = speed; +		s3c64xx_spi_config(sdd); +	} -		spin_unlock_irqrestore(&sdd->lock, flags); +	/* Polling method for xfers not bigger than FIFO capacity */ +	use_dma = 0; +	if (!is_polling(sdd) && +	    (sdd->rx_dma.ch && sdd->tx_dma.ch && +	     (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1)))) +		use_dma = 1; -		status = wait_for_xfer(sdd, xfer, use_dma); +	spin_lock_irqsave(&sdd->lock, flags); -		if (status) { -			dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", -				xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, -				(sdd->state & RXBUSY) ? 'f' : 'p', -				(sdd->state & TXBUSY) ? 'f' : 'p', -				xfer->len); +	/* Pending only which is to be done */ +	sdd->state &= ~RXBUSY; +	sdd->state &= ~TXBUSY; -			if (use_dma) { -				if (xfer->tx_buf != NULL -						&& (sdd->state & TXBUSY)) -					s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma); -				if (xfer->rx_buf != NULL -						&& (sdd->state & RXBUSY)) -					s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma); -			} +	enable_datapath(sdd, spi, xfer, use_dma); -			goto out; -		} +	/* Start the signals */ +	writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL); -		if (xfer->delay_usecs) -			udelay(xfer->delay_usecs); +	spin_unlock_irqrestore(&sdd->lock, flags); -		if (xfer->cs_change) { -			/* Hint that the next mssg is gonna be -			   for the same device */ -			if (list_is_last(&xfer->transfer_list, -						&msg->transfers)) -				cs_toggle = 1; +	if (use_dma) +		status = wait_for_dma(sdd, xfer); +	else +		status = wait_for_pio(sdd, xfer); + +	if (status) { +		dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", +			xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, +			(sdd->state & RXBUSY) ? 'f' : 'p', +			(sdd->state & TXBUSY) ? 'f' : 'p', +			xfer->len); + +		if (use_dma) { +			if (xfer->tx_buf != NULL +			    && (sdd->state & TXBUSY)) +				dmaengine_terminate_all(sdd->tx_dma.ch); +			if (xfer->rx_buf != NULL +			    && (sdd->state & RXBUSY)) +				dmaengine_terminate_all(sdd->rx_dma.ch);  		} - -		msg->actual_length += xfer->len; - +	} else {  		flush_fifo(sdd);  	} -out: -	if (!cs_toggle || status) -		disable_cs(sdd, spi); -	else -		sdd->tgl_spi = spi; - -	s3c64xx_spi_unmap_mssg(sdd, msg); - -	msg->status = status; - -	spi_finalize_current_message(master); - -	return 0; +	return status;  }  static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata( @@ -1014,7 +772,6 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(  	cs = kzalloc(sizeof(*cs), GFP_KERNEL);  	if (!cs) { -		dev_err(&spi->dev, "could not allocate memory for controller data\n");  		of_node_put(data_np);  		return ERR_PTR(-ENOMEM);  	} @@ -1071,6 +828,8 @@ static int s3c64xx_spi_setup(struct spi_device *spi)  					cs->line, err);  				goto err_gpio_req;  			} + +			spi->cs_gpio = cs->line;  		}  		spi_set_ctldata(spi, cs); @@ -1117,12 +876,13 @@ static int s3c64xx_spi_setup(struct spi_device *spi)  	}  	pm_runtime_put(&sdd->pdev->dev); -	disable_cs(sdd, spi); +	writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);  	return 0;  setup_exit: +	pm_runtime_put(&sdd->pdev->dev);  	/* setup() returns with device de-selected */ -	disable_cs(sdd, spi); +	writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);  	gpio_free(cs->line);  	spi_set_ctldata(spi, NULL); @@ -1140,8 +900,8 @@ static void s3c64xx_spi_cleanup(struct spi_device *spi)  	struct s3c64xx_spi_driver_data *sdd;  	sdd = spi_master_get_devdata(spi->master); -	if (cs && sdd->cs_gpio) { -		gpio_free(cs->line); +	if (spi->cs_gpio) { +		gpio_free(spi->cs_gpio);  		if (spi->dev.of_node)  			kfree(cs);  	} @@ -1225,10 +985,8 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)  	u32 temp;  	sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL); -	if (!sci) { -		dev_err(dev, "memory allocation for spi_info failed\n"); +	if (!sci)  		return ERR_PTR(-ENOMEM); -	}  	if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {  		dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n"); @@ -1359,7 +1117,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)  	master->setup = s3c64xx_spi_setup;  	master->cleanup = s3c64xx_spi_cleanup;  	master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer; -	master->transfer_one_message = s3c64xx_spi_transfer_one_message; +	master->prepare_message = s3c64xx_spi_prepare_message; +	master->transfer_one = s3c64xx_spi_transfer_one;  	master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;  	master->num_chipselect = sci->num_cs;  	master->dma_alignment = 8; @@ -1368,6 +1127,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)  	/* the spi->mode bits understood by this driver: */  	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;  	master->auto_runtime_pm = true; +	if (!is_polling(sdd)) +		master->can_dma = s3c64xx_spi_can_dma;  	sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);  	if (IS_ERR(sdd->regs)) { @@ -1428,9 +1189,12 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)  	       S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,  	       sdd->regs + S3C64XX_SPI_INT_EN); -	if (spi_register_master(master)) { -		dev_err(&pdev->dev, "cannot register SPI master\n"); -		ret = -EBUSY; +	pm_runtime_set_active(&pdev->dev); +	pm_runtime_enable(&pdev->dev); + +	ret = devm_spi_register_master(&pdev->dev, master); +	if (ret != 0) { +		dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);  		goto err3;  	} @@ -1440,8 +1204,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)  					mem_res,  					sdd->rx_dma.dmach, sdd->tx_dma.dmach); -	pm_runtime_enable(&pdev->dev); -  	return 0;  err3: @@ -1461,16 +1223,12 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)  	pm_runtime_disable(&pdev->dev); -	spi_unregister_master(master); -  	writel(0, sdd->regs + S3C64XX_SPI_INT_EN);  	clk_disable_unprepare(sdd->src_clk);  	clk_disable_unprepare(sdd->clk); -	spi_master_put(master); -  	return 0;  } @@ -1480,11 +1238,14 @@ static int s3c64xx_spi_suspend(struct device *dev)  	struct spi_master *master = dev_get_drvdata(dev);  	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); -	spi_master_suspend(master); +	int ret = spi_master_suspend(master); +	if (ret) +		return ret; -	/* Disable the clock */ -	clk_disable_unprepare(sdd->src_clk); -	clk_disable_unprepare(sdd->clk); +	if (!pm_runtime_suspended(dev)) { +		clk_disable_unprepare(sdd->clk); +		clk_disable_unprepare(sdd->src_clk); +	}  	sdd->cur_speed = 0; /* Output Clock is stopped */ @@ -1500,15 +1261,14 @@ static int s3c64xx_spi_resume(struct device *dev)  	if (sci->cfg_gpio)  		sci->cfg_gpio(); -	/* Enable the clock */ -	clk_prepare_enable(sdd->src_clk); -	clk_prepare_enable(sdd->clk); +	if (!pm_runtime_suspended(dev)) { +		clk_prepare_enable(sdd->src_clk); +		clk_prepare_enable(sdd->clk); +	}  	s3c64xx_spi_hwinit(sdd, sdd->port_id); -	spi_master_resume(master); - -	return 0; +	return spi_master_resume(master);  }  #endif /* CONFIG_PM_SLEEP */ @@ -1528,9 +1288,17 @@ static int s3c64xx_spi_runtime_resume(struct device *dev)  {  	struct spi_master *master = dev_get_drvdata(dev);  	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); +	int ret; -	clk_prepare_enable(sdd->src_clk); -	clk_prepare_enable(sdd->clk); +	ret = clk_prepare_enable(sdd->src_clk); +	if (ret != 0) +		return ret; + +	ret = clk_prepare_enable(sdd->clk); +	if (ret != 0) { +		clk_disable_unprepare(sdd->src_clk); +		return ret; +	}  	return 0;  } @@ -1616,6 +1384,18 @@ static struct platform_device_id s3c64xx_spi_driver_ids[] = {  };  static const struct of_device_id s3c64xx_spi_dt_match[] = { +	{ .compatible = "samsung,s3c2443-spi", +			.data = (void *)&s3c2443_spi_port_config, +	}, +	{ .compatible = "samsung,s3c6410-spi", +			.data = (void *)&s3c6410_spi_port_config, +	}, +	{ .compatible = "samsung,s5pc100-spi", +			.data = (void *)&s5pc100_spi_port_config, +	}, +	{ .compatible = "samsung,s5pv210-spi", +			.data = (void *)&s5pv210_spi_port_config, +	},  	{ .compatible = "samsung,exynos4210-spi",  			.data = (void *)&exynos4_spi_port_config,  	}, @@ -1633,22 +1413,13 @@ static struct platform_driver s3c64xx_spi_driver = {  		.pm = &s3c64xx_spi_pm,  		.of_match_table = of_match_ptr(s3c64xx_spi_dt_match),  	}, +	.probe = s3c64xx_spi_probe,  	.remove = s3c64xx_spi_remove,  	.id_table = s3c64xx_spi_driver_ids,  };  MODULE_ALIAS("platform:s3c64xx-spi"); -static int __init s3c64xx_spi_init(void) -{ -	return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe); -} -subsys_initcall(s3c64xx_spi_init); - -static void __exit s3c64xx_spi_exit(void) -{ -	platform_driver_unregister(&s3c64xx_spi_driver); -} -module_exit(s3c64xx_spi_exit); +module_platform_driver(s3c64xx_spi_driver);  MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");  MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");  | 
