diff options
Diffstat (limited to 'drivers/dma/sh')
| -rw-r--r-- | drivers/dma/sh/Kconfig | 8 | ||||
| -rw-r--r-- | drivers/dma/sh/Makefile | 1 | ||||
| -rw-r--r-- | drivers/dma/sh/rcar-audmapp.c | 320 | ||||
| -rw-r--r-- | drivers/dma/sh/rcar-hpbdma.c | 21 | ||||
| -rw-r--r-- | drivers/dma/sh/shdma-base.c | 110 | ||||
| -rw-r--r-- | drivers/dma/sh/shdma-of.c | 3 | ||||
| -rw-r--r-- | drivers/dma/sh/shdmac.c | 30 | ||||
| -rw-r--r-- | drivers/dma/sh/sudmac.c | 11 | 
8 files changed, 458 insertions, 46 deletions
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index dadd9e010c0..0f719816c91 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig @@ -4,7 +4,7 @@  config SH_DMAE_BASE  	bool "Renesas SuperH DMA Engine support" -	depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) +	depends on (SUPERH && SH_DMA) || ARCH_SHMOBILE || COMPILE_TEST  	depends on !SH_DMA_API  	default y  	select DMA_ENGINE @@ -29,6 +29,12 @@ config RCAR_HPB_DMAE  	help  	  Enable support for the Renesas R-Car series DMA controllers. +config RCAR_AUDMAC_PP +	tristate "Renesas R-Car Audio DMAC Peripheral Peripheral support" +	depends on SH_DMAE_BASE +	help +	  Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers. +  config SHDMA_R8A73A4  	def_bool y  	depends on ARCH_R8A73A4 && SH_DMAE != n diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index e856af23b78..1ce88b28cfc 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile @@ -7,3 +7,4 @@ endif  shdma-objs := $(shdma-y)  obj-$(CONFIG_SUDMAC) += sudmac.o  obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o +obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c new file mode 100644 index 00000000000..2de77289a2e --- /dev/null +++ b/drivers/dma/sh/rcar-audmapp.c @@ -0,0 +1,320 @@ +/* + * This is for Renesas R-Car Audio-DMAC-peri-peri. + * + * Copyright (C) 2014 Renesas Electronics Corporation + * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> + * + * based on the drivers/dma/sh/shdma.c + * + * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/dmaengine.h> +#include <linux/platform_data/dma-rcar-audmapp.h> +#include <linux/platform_device.h> +#include <linux/shdma-base.h> + +/* + * DMA register + */ +#define PDMASAR		0x00 +#define PDMADAR		0x04 +#define PDMACHCR	0x0c + +/* PDMACHCR */ +#define PDMACHCR_DE		(1 << 0) + +#define AUDMAPP_MAX_CHANNELS	29 + +/* Default MEMCPY transfer size = 2^2 = 4 bytes */ +#define LOG2_DEFAULT_XFER_SIZE	2 +#define AUDMAPP_SLAVE_NUMBER	256 +#define AUDMAPP_LEN_MAX		(16 * 1024 * 1024) + +struct audmapp_chan { +	struct shdma_chan shdma_chan; +	struct audmapp_slave_config *config; +	void __iomem *base; +}; + +struct audmapp_device { +	struct shdma_dev shdma_dev; +	struct audmapp_pdata *pdata; +	struct device *dev; +	void __iomem *chan_reg; +}; + +#define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan) +#define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device,	\ +				  struct audmapp_device, shdma_dev.dma_dev) + +static void audmapp_write(struct audmapp_chan *auchan, u32 data, u32 reg) +{ +	struct audmapp_device *audev = to_dev(auchan); +	struct device *dev = audev->dev; + +	dev_dbg(dev, "w %p : %08x\n", auchan->base + reg, data); + +	iowrite32(data, auchan->base + reg); +} + +static u32 audmapp_read(struct audmapp_chan *auchan, u32 reg) +{ +	return ioread32(auchan->base + reg); +} + +static void audmapp_halt(struct shdma_chan *schan) +{ +	struct audmapp_chan *auchan = to_chan(schan); +	int i; + +	audmapp_write(auchan, 0, PDMACHCR); + +	for (i = 0; i < 1024; i++) { +		if (0 == audmapp_read(auchan, PDMACHCR)) +			return; +		udelay(1); +	} +} + +static void audmapp_start_xfer(struct shdma_chan *schan, +			       struct shdma_desc *sdecs) +{ +	struct audmapp_chan *auchan = to_chan(schan); +	struct audmapp_device *audev = to_dev(auchan); +	struct audmapp_slave_config *cfg = auchan->config; +	struct device *dev = audev->dev; +	u32 chcr = cfg->chcr | PDMACHCR_DE; + +	dev_dbg(dev, "src/dst/chcr = %pad/%pad/%x\n", +		&cfg->src, &cfg->dst, cfg->chcr); + +	audmapp_write(auchan, cfg->src,	PDMASAR); +	audmapp_write(auchan, cfg->dst,	PDMADAR); +	audmapp_write(auchan, chcr,	PDMACHCR); +} + +static struct audmapp_slave_config * +audmapp_find_slave(struct audmapp_chan *auchan, int slave_id) +{ +	struct audmapp_device *audev = to_dev(auchan); +	struct audmapp_pdata *pdata = audev->pdata; +	struct audmapp_slave_config *cfg; +	int i; + +	if (slave_id >= AUDMAPP_SLAVE_NUMBER) +		return NULL; + +	for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) +		if (cfg->slave_id == slave_id) +			return cfg; + +	return NULL; +} + +static int audmapp_set_slave(struct shdma_chan *schan, int slave_id, +			     dma_addr_t slave_addr, bool try) +{ +	struct audmapp_chan *auchan = to_chan(schan); +	struct audmapp_slave_config *cfg = +		audmapp_find_slave(auchan, slave_id); + +	if (!cfg) +		return -ENODEV; +	if (try) +		return 0; + +	auchan->config	= cfg; + +	return 0; +} + +static int audmapp_desc_setup(struct shdma_chan *schan, +			      struct shdma_desc *sdecs, +			      dma_addr_t src, dma_addr_t dst, size_t *len) +{ +	struct audmapp_chan *auchan = to_chan(schan); +	struct audmapp_slave_config *cfg = auchan->config; + +	if (!cfg) +		return -ENODEV; + +	if (*len > (size_t)AUDMAPP_LEN_MAX) +		*len = (size_t)AUDMAPP_LEN_MAX; + +	return 0; +} + +static void audmapp_setup_xfer(struct shdma_chan *schan, +			       int slave_id) +{ +} + +static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan) +{ +	return 0; /* always fixed address */ +} + +static bool audmapp_channel_busy(struct shdma_chan *schan) +{ +	struct audmapp_chan *auchan = to_chan(schan); +	u32 chcr = audmapp_read(auchan, PDMACHCR); + +	return chcr & ~PDMACHCR_DE; +} + +static bool audmapp_desc_completed(struct shdma_chan *schan, +				   struct shdma_desc *sdesc) +{ +	return true; +} + +static struct shdma_desc *audmapp_embedded_desc(void *buf, int i) +{ +	return &((struct shdma_desc *)buf)[i]; +} + +static const struct shdma_ops audmapp_shdma_ops = { +	.halt_channel	= audmapp_halt, +	.desc_setup	= audmapp_desc_setup, +	.set_slave	= audmapp_set_slave, +	.start_xfer	= audmapp_start_xfer, +	.embedded_desc	= audmapp_embedded_desc, +	.setup_xfer	= audmapp_setup_xfer, +	.slave_addr	= audmapp_slave_addr, +	.channel_busy	= audmapp_channel_busy, +	.desc_completed	= audmapp_desc_completed, +}; + +static int audmapp_chan_probe(struct platform_device *pdev, +			      struct audmapp_device *audev, int id) +{ +	struct shdma_dev *sdev = &audev->shdma_dev; +	struct audmapp_chan *auchan; +	struct shdma_chan *schan; +	struct device *dev = audev->dev; + +	auchan = devm_kzalloc(dev, sizeof(*auchan), GFP_KERNEL); +	if (!auchan) +		return -ENOMEM; + +	schan = &auchan->shdma_chan; +	schan->max_xfer_len = AUDMAPP_LEN_MAX; + +	shdma_chan_probe(sdev, schan, id); + +	auchan->base = audev->chan_reg + 0x20 + (0x10 * id); +	dev_dbg(dev, "%02d : %p / %p", id, auchan->base, audev->chan_reg); + +	return 0; +} + +static void audmapp_chan_remove(struct audmapp_device *audev) +{ +	struct dma_device *dma_dev = &audev->shdma_dev.dma_dev; +	struct shdma_chan *schan; +	int i; + +	shdma_for_each_chan(schan, &audev->shdma_dev, i) { +		BUG_ON(!schan); +		shdma_chan_remove(schan); +	} +	dma_dev->chancnt = 0; +} + +static int audmapp_probe(struct platform_device *pdev) +{ +	struct audmapp_pdata *pdata = pdev->dev.platform_data; +	struct audmapp_device *audev; +	struct shdma_dev *sdev; +	struct dma_device *dma_dev; +	struct resource *res; +	int err, i; + +	if (!pdata) +		return -ENODEV; + +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + +	audev = devm_kzalloc(&pdev->dev, sizeof(*audev), GFP_KERNEL); +	if (!audev) +		return -ENOMEM; + +	audev->dev	= &pdev->dev; +	audev->pdata	= pdata; +	audev->chan_reg	= devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(audev->chan_reg)) +		return PTR_ERR(audev->chan_reg); + +	sdev		= &audev->shdma_dev; +	sdev->ops	= &audmapp_shdma_ops; +	sdev->desc_size	= sizeof(struct shdma_desc); + +	dma_dev			= &sdev->dma_dev; +	dma_dev->copy_align	= LOG2_DEFAULT_XFER_SIZE; +	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + +	err = shdma_init(&pdev->dev, sdev, AUDMAPP_MAX_CHANNELS); +	if (err < 0) +		return err; + +	platform_set_drvdata(pdev, audev); + +	/* Create DMA Channel */ +	for (i = 0; i < AUDMAPP_MAX_CHANNELS; i++) { +		err = audmapp_chan_probe(pdev, audev, i); +		if (err) +			goto chan_probe_err; +	} + +	err = dma_async_device_register(dma_dev); +	if (err < 0) +		goto chan_probe_err; + +	return err; + +chan_probe_err: +	audmapp_chan_remove(audev); +	shdma_cleanup(sdev); + +	return err; +} + +static int audmapp_remove(struct platform_device *pdev) +{ +	struct audmapp_device *audev = platform_get_drvdata(pdev); +	struct dma_device *dma_dev = &audev->shdma_dev.dma_dev; + +	dma_async_device_unregister(dma_dev); + +	audmapp_chan_remove(audev); +	shdma_cleanup(&audev->shdma_dev); + +	return 0; +} + +static struct platform_driver audmapp_driver = { +	.probe		= audmapp_probe, +	.remove		= audmapp_remove, +	.driver		= { +		.owner	= THIS_MODULE, +		.name	= "rcar-audmapp-engine", +	}, +}; +module_platform_driver(audmapp_driver); + +MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); +MODULE_DESCRIPTION("Renesas R-Car Audio DMAC peri-peri driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c index 45a520281ce..b212d9471ab 100644 --- a/drivers/dma/sh/rcar-hpbdma.c +++ b/drivers/dma/sh/rcar-hpbdma.c @@ -18,6 +18,7 @@  #include <linux/dmaengine.h>  #include <linux/delay.h> +#include <linux/err.h>  #include <linux/init.h>  #include <linux/interrupt.h>  #include <linux/module.h> @@ -60,6 +61,7 @@  #define HPB_DMAE_DSTPR_DMSTP	BIT(0)  /* DMA status register (DSTSR) bits */ +#define HPB_DMAE_DSTSR_DQSTS	BIT(2)  #define HPB_DMAE_DSTSR_DMSTS	BIT(0)  /* DMA common registers */ @@ -93,6 +95,7 @@ struct hpb_dmae_chan {  	void __iomem *base;  	const struct hpb_dmae_slave_config *cfg;  	char dev_id[16];		/* unique name per DMAC of channel */ +	dma_addr_t slave_addr;  };  struct hpb_dmae_device { @@ -285,6 +288,9 @@ static void hpb_dmae_halt(struct shdma_chan *schan)  	ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);  	ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR); + +	chan->plane_idx = 0; +	chan->first_desc = true;  }  static const struct hpb_dmae_slave_config * @@ -384,7 +390,10 @@ static bool hpb_dmae_channel_busy(struct shdma_chan *schan)  	struct hpb_dmae_chan *chan = to_chan(schan);  	u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR); -	return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS; +	if (chan->xfer_mode == XFER_DOUBLE) +		return dstsr & HPB_DMAE_DSTSR_DQSTS; +	else +		return dstsr & HPB_DMAE_DSTSR_DMSTS;  }  static int @@ -432,7 +441,6 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,  		hpb_chan->xfer_mode = XFER_DOUBLE;  	} else {  		dev_err(hpb_chan->shdma_chan.dev, "DCR setting error"); -		shdma_free_irq(&hpb_chan->shdma_chan);  		return -EINVAL;  	} @@ -446,7 +454,8 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,  	return 0;  } -static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try) +static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, +			      dma_addr_t slave_addr, bool try)  {  	struct hpb_dmae_chan *chan = to_chan(schan);  	const struct hpb_dmae_slave_config *sc = @@ -457,6 +466,7 @@ static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)  	if (try)  		return 0;  	chan->cfg = sc; +	chan->slave_addr = slave_addr ? : sc->addr;  	return hpb_dmae_alloc_chan_resources(chan, sc);  } @@ -468,7 +478,7 @@ static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)  {  	struct hpb_dmae_chan *chan = to_chan(schan); -	return chan->cfg->addr; +	return chan->slave_addr;  }  static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i) @@ -508,6 +518,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)  	}  	schan = &new_hpb_chan->shdma_chan; +	schan->max_xfer_len = HPB_DMA_TCR_MAX; +  	shdma_chan_probe(sdev, schan, id);  	if (pdev->id >= 0) @@ -614,7 +626,6 @@ static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)  	shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {  		BUG_ON(!schan); -		shdma_free_irq(schan);  		shdma_chan_remove(schan);  	}  	dma_dev->chancnt = 0; diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index d94ab592cc1..b35007e21e6 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -73,8 +73,7 @@ static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)  static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)  {  	struct shdma_desc *chunk, *c, *desc = -		container_of(tx, struct shdma_desc, async_tx), -		*last = desc; +		container_of(tx, struct shdma_desc, async_tx);  	struct shdma_chan *schan = to_shdma_chan(tx->chan);  	dma_async_tx_callback callback = tx->callback;  	dma_cookie_t cookie; @@ -98,19 +97,20 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)  				      &chunk->node == &schan->ld_free))  			break;  		chunk->mark = DESC_SUBMITTED; -		/* Callback goes to the last chunk */ -		chunk->async_tx.callback = NULL; +		if (chunk->chunks == 1) { +			chunk->async_tx.callback = callback; +			chunk->async_tx.callback_param = tx->callback_param; +		} else { +			/* Callback goes to the last chunk */ +			chunk->async_tx.callback = NULL; +		}  		chunk->cookie = cookie;  		list_move_tail(&chunk->node, &schan->ld_queue); -		last = chunk;  		dev_dbg(schan->dev, "submit #%d@%p on %d\n", -			tx->cookie, &last->async_tx, schan->id); +			tx->cookie, &chunk->async_tx, schan->id);  	} -	last->async_tx.callback = callback; -	last->async_tx.callback_param = tx->callback_param; -  	if (power_up) {  		int ret;  		schan->pm_state = SHDMA_PM_BUSY; @@ -227,7 +227,7 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg)  	struct shdma_chan *schan = to_shdma_chan(chan);  	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);  	const struct shdma_ops *ops = sdev->ops; -	int match = (int)arg; +	int match = (long)arg;  	int ret;  	if (match < 0) @@ -304,6 +304,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)  	dma_async_tx_callback callback = NULL;  	void *param = NULL;  	unsigned long flags; +	LIST_HEAD(cyclic_list);  	spin_lock_irqsave(&schan->chan_lock, flags);  	list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { @@ -369,10 +370,16 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)  		if (((desc->mark == DESC_COMPLETED ||  		      desc->mark == DESC_WAITING) &&  		     async_tx_test_ack(&desc->async_tx)) || all) { -			/* Remove from ld_queue list */ -			desc->mark = DESC_IDLE; -			list_move(&desc->node, &schan->ld_free); +			if (all || !desc->cyclic) { +				/* Remove from ld_queue list */ +				desc->mark = DESC_IDLE; +				list_move(&desc->node, &schan->ld_free); +			} else { +				/* reuse as cyclic */ +				desc->mark = DESC_SUBMITTED; +				list_move_tail(&desc->node, &cyclic_list); +			}  			if (list_empty(&schan->ld_queue)) {  				dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); @@ -389,6 +396,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)  		 */  		schan->dma_chan.completed_cookie = schan->dma_chan.cookie; +	list_splice_tail(&cyclic_list, &schan->ld_queue); +  	spin_unlock_irqrestore(&schan->chan_lock, flags);  	if (callback) @@ -491,8 +500,8 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,  	}  	dev_dbg(schan->dev, -		"chaining (%u/%u)@%x -> %x with %p, cookie %d\n", -		copy_size, *len, *src, *dst, &new->async_tx, +		"chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n", +		copy_size, *len, src, dst, &new->async_tx,  		new->async_tx.cookie);  	new->mark = DESC_PREPARED; @@ -521,7 +530,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,   */  static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,  	struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, -	enum dma_transfer_direction direction, unsigned long flags) +	enum dma_transfer_direction direction, unsigned long flags, bool cyclic)  {  	struct scatterlist *sg;  	struct shdma_desc *first = NULL, *new = NULL /* compiler... */; @@ -555,8 +564,8 @@ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,  			goto err_get_desc;  		do { -			dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n", -				i, sg, len, (unsigned long long)sg_addr); +			dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n", +				i, sg, len, &sg_addr);  			if (direction == DMA_DEV_TO_MEM)  				new = shdma_add_desc(schan, flags, @@ -569,7 +578,11 @@ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,  			if (!new)  				goto err_get_desc; -			new->chunks = chunks--; +			new->cyclic = cyclic; +			if (cyclic) +				new->chunks = 1; +			else +				new->chunks = chunks--;  			list_add_tail(&new->node, &tx_list);  		} while (len);  	} @@ -612,7 +625,8 @@ static struct dma_async_tx_descriptor *shdma_prep_memcpy(  	sg_dma_address(&sg) = dma_src;  	sg_dma_len(&sg) = len; -	return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags); +	return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, +			     flags, false);  }  static struct dma_async_tx_descriptor *shdma_prep_slave_sg( @@ -640,7 +654,58 @@ static struct dma_async_tx_descriptor *shdma_prep_slave_sg(  	slave_addr = ops->slave_addr(schan);  	return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, -			      direction, flags); +			     direction, flags, false); +} + +#define SHDMA_MAX_SG_LEN 32 + +static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( +	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, +	size_t period_len, enum dma_transfer_direction direction, +	unsigned long flags, void *context) +{ +	struct shdma_chan *schan = to_shdma_chan(chan); +	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); +	const struct shdma_ops *ops = sdev->ops; +	unsigned int sg_len = buf_len / period_len; +	int slave_id = schan->slave_id; +	dma_addr_t slave_addr; +	struct scatterlist sgl[SHDMA_MAX_SG_LEN]; +	int i; + +	if (!chan) +		return NULL; + +	BUG_ON(!schan->desc_num); + +	if (sg_len > SHDMA_MAX_SG_LEN) { +		dev_err(schan->dev, "sg length %d exceds limit %d", +				sg_len, SHDMA_MAX_SG_LEN); +		return NULL; +	} + +	/* Someone calling slave DMA on a generic channel? */ +	if (slave_id < 0 || (buf_len < period_len)) { +		dev_warn(schan->dev, +			"%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", +			__func__, buf_len, period_len, slave_id); +		return NULL; +	} + +	slave_addr = ops->slave_addr(schan); + +	sg_init_table(sgl, sg_len); +	for (i = 0; i < sg_len; i++) { +		dma_addr_t src = buf_addr + (period_len * i); + +		sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, +			    offset_in_page(src)); +		sg_dma_address(&sgl[i]) = src; +		sg_dma_len(&sgl[i]) = period_len; +	} + +	return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, +			     direction, flags, true);  }  static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, @@ -724,7 +789,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan,  	 * If we don't find cookie on the queue, it has been aborted and we have  	 * to report error  	 */ -	if (status != DMA_SUCCESS) { +	if (status != DMA_COMPLETE) {  		struct shdma_desc *sdesc;  		status = DMA_ERROR;  		list_for_each_entry(sdesc, &schan->ld_queue, node) @@ -915,6 +980,7 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev,  	/* Compulsory for DMA_SLAVE fields */  	dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; +	dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;  	dma_dev->device_control = shdma_control;  	dma_dev->dev = dev; diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c index 06473a05fe4..b4ff9d3e56d 100644 --- a/drivers/dma/sh/shdma-of.c +++ b/drivers/dma/sh/shdma-of.c @@ -33,7 +33,8 @@ static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,  	/* Only slave DMA channels can be allocated via DT */  	dma_cap_set(DMA_SLAVE, mask); -	chan = dma_request_channel(mask, shdma_chan_filter, (void *)id); +	chan = dma_request_channel(mask, shdma_chan_filter, +				   (void *)(uintptr_t)id);  	if (chan)  		to_shdma_chan(chan)->hw_req = id; diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 1069e8869f2..146d5df926d 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -18,21 +18,22 @@   *   */ +#include <linux/delay.h> +#include <linux/dmaengine.h> +#include <linux/err.h>  #include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kdebug.h>  #include <linux/module.h> +#include <linux/notifier.h>  #include <linux/of.h>  #include <linux/of_device.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/dmaengine.h> -#include <linux/delay.h>  #include <linux/platform_device.h>  #include <linux/pm_runtime.h> +#include <linux/rculist.h>  #include <linux/sh_dma.h> -#include <linux/notifier.h> -#include <linux/kdebug.h> +#include <linux/slab.h>  #include <linux/spinlock.h> -#include <linux/rculist.h>  #include "../dmaengine.h"  #include "shdma.h" @@ -443,6 +444,7 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)  	return ret;  } +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)  static irqreturn_t sh_dmae_err(int irq, void *data)  {  	struct sh_dmae_device *shdev = data; @@ -453,6 +455,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data)  	sh_dmae_reset(shdev);  	return IRQ_HANDLED;  } +#endif  static bool sh_dmae_desc_completed(struct shdma_chan *schan,  				   struct shdma_desc *sdesc) @@ -637,7 +640,7 @@ static int sh_dmae_resume(struct device *dev)  #define sh_dmae_resume NULL  #endif -const struct dev_pm_ops sh_dmae_pm = { +static const struct dev_pm_ops sh_dmae_pm = {  	.suspend		= sh_dmae_suspend,  	.resume			= sh_dmae_resume,  	.runtime_suspend	= sh_dmae_runtime_suspend, @@ -685,9 +688,12 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);  static int sh_dmae_probe(struct platform_device *pdev)  {  	const struct sh_dmae_pdata *pdata; -	unsigned long irqflags = IRQF_DISABLED, -		chan_flag[SH_DMAE_MAX_CHANNELS] = {}; -	int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; +	unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {}; +	int chan_irq[SH_DMAE_MAX_CHANNELS]; +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM) +	unsigned long irqflags = 0; +	int errirq; +#endif  	int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;  	struct sh_dmae_device *shdev;  	struct dma_device *dma_dev; @@ -838,7 +844,7 @@ static int sh_dmae_probe(struct platform_device *pdev)  				    IORESOURCE_IRQ_SHAREABLE)  					chan_flag[irq_cnt] = IRQF_SHARED;  				else -					chan_flag[irq_cnt] = IRQF_DISABLED; +					chan_flag[irq_cnt] = 0;  				dev_dbg(&pdev->dev,  					"Found IRQ %d for channel %d\n",  					i, irq_cnt); diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c index c7e9cdff070..3ce10390989 100644 --- a/drivers/dma/sh/sudmac.c +++ b/drivers/dma/sh/sudmac.c @@ -14,12 +14,13 @@   * published by the Free Software Foundation.   */ +#include <linux/dmaengine.h> +#include <linux/err.h>  #include <linux/init.h> -#include <linux/module.h> -#include <linux/slab.h>  #include <linux/interrupt.h> -#include <linux/dmaengine.h> +#include <linux/module.h>  #include <linux/platform_device.h> +#include <linux/slab.h>  #include <linux/sudmac.h>  struct sudmac_chan { @@ -178,8 +179,8 @@ static int sudmac_desc_setup(struct shdma_chan *schan,  	struct sudmac_chan *sc = to_chan(schan);  	struct sudmac_desc *sd = to_desc(sdesc); -	dev_dbg(sc->shdma_chan.dev, "%s: src=%x, dst=%x, len=%d\n", -		__func__, src, dst, *len); +	dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n", +		__func__, &src, &dst, *len);  	if (*len > schan->max_xfer_len)  		*len = schan->max_xfer_len;  | 
