diff options
Diffstat (limited to 'drivers/dma/sh')
| -rw-r--r-- | drivers/dma/sh/Kconfig | 40 | ||||
| -rw-r--r-- | drivers/dma/sh/Makefile | 10 | ||||
| -rw-r--r-- | drivers/dma/sh/rcar-audmapp.c | 320 | ||||
| -rw-r--r-- | drivers/dma/sh/rcar-hpbdma.c | 666 | ||||
| -rw-r--r-- | drivers/dma/sh/shdma-arm.h | 51 | ||||
| -rw-r--r-- | drivers/dma/sh/shdma-base.c | 1016 | ||||
| -rw-r--r-- | drivers/dma/sh/shdma-of.c | 80 | ||||
| -rw-r--r-- | drivers/dma/sh/shdma-r8a73a4.c | 77 | ||||
| -rw-r--r-- | drivers/dma/sh/shdma.h | 72 | ||||
| -rw-r--r-- | drivers/dma/sh/shdmac.c | 960 | ||||
| -rw-r--r-- | drivers/dma/sh/sudmac.c | 425 | 
11 files changed, 3717 insertions, 0 deletions
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig new file mode 100644 index 00000000000..0f719816c91 --- /dev/null +++ b/drivers/dma/sh/Kconfig @@ -0,0 +1,40 @@ +# +# DMA engine configuration for sh +# + +config SH_DMAE_BASE +	bool "Renesas SuperH DMA Engine support" +	depends on (SUPERH && SH_DMA) || ARCH_SHMOBILE || COMPILE_TEST +	depends on !SH_DMA_API +	default y +	select DMA_ENGINE +	help +	  Enable support for the Renesas SuperH DMA controllers. + +config SH_DMAE +	tristate "Renesas SuperH DMAC support" +	depends on SH_DMAE_BASE +	help +	  Enable support for the Renesas SuperH DMA controllers. + +config SUDMAC +	tristate "Renesas SUDMAC support" +	depends on SH_DMAE_BASE +	help +	  Enable support for the Renesas SUDMAC controllers. + +config RCAR_HPB_DMAE +	tristate "Renesas R-Car HPB DMAC support" +	depends on SH_DMAE_BASE +	help +	  Enable support for the Renesas R-Car series DMA controllers. + +config RCAR_AUDMAC_PP +	tristate "Renesas R-Car Audio DMAC Peripheral Peripheral support" +	depends on SH_DMAE_BASE +	help +	  Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers. + +config SHDMA_R8A73A4 +	def_bool y +	depends on ARCH_R8A73A4 && SH_DMAE != n diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile new file mode 100644 index 00000000000..1ce88b28cfc --- /dev/null +++ b/drivers/dma/sh/Makefile @@ -0,0 +1,10 @@ +obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o +obj-$(CONFIG_SH_DMAE) += shdma.o +shdma-y := shdmac.o +ifeq ($(CONFIG_OF),y) +shdma-$(CONFIG_SHDMA_R8A73A4) += shdma-r8a73a4.o +endif +shdma-objs := $(shdma-y) +obj-$(CONFIG_SUDMAC) += sudmac.o +obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o +obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c new file mode 100644 index 00000000000..2de77289a2e --- /dev/null +++ b/drivers/dma/sh/rcar-audmapp.c @@ -0,0 +1,320 @@ +/* + * This is for Renesas R-Car Audio-DMAC-peri-peri. + * + * Copyright (C) 2014 Renesas Electronics Corporation + * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> + * + * based on the drivers/dma/sh/shdma.c + * + * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/dmaengine.h> +#include <linux/platform_data/dma-rcar-audmapp.h> +#include <linux/platform_device.h> +#include <linux/shdma-base.h> + +/* + * DMA register + */ +#define PDMASAR		0x00 +#define PDMADAR		0x04 +#define PDMACHCR	0x0c + +/* PDMACHCR */ +#define PDMACHCR_DE		(1 << 0) + +#define AUDMAPP_MAX_CHANNELS	29 + +/* Default MEMCPY transfer size = 2^2 = 4 bytes */ +#define LOG2_DEFAULT_XFER_SIZE	2 +#define AUDMAPP_SLAVE_NUMBER	256 +#define AUDMAPP_LEN_MAX		(16 * 1024 * 1024) + +struct audmapp_chan { +	struct shdma_chan shdma_chan; +	struct audmapp_slave_config *config; +	void __iomem *base; +}; + +struct audmapp_device { +	struct shdma_dev shdma_dev; +	struct audmapp_pdata *pdata; +	struct device *dev; +	void __iomem *chan_reg; +}; + +#define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan) +#define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device,	\ +				  struct audmapp_device, shdma_dev.dma_dev) + +static void audmapp_write(struct audmapp_chan *auchan, u32 data, u32 reg) +{ +	struct audmapp_device *audev = to_dev(auchan); +	struct device *dev = audev->dev; + +	dev_dbg(dev, "w %p : %08x\n", auchan->base + reg, data); + +	iowrite32(data, auchan->base + reg); +} + +static u32 audmapp_read(struct audmapp_chan *auchan, u32 reg) +{ +	return ioread32(auchan->base + reg); +} + +static void audmapp_halt(struct shdma_chan *schan) +{ +	struct audmapp_chan *auchan = to_chan(schan); +	int i; + +	audmapp_write(auchan, 0, PDMACHCR); + +	for (i = 0; i < 1024; i++) { +		if (0 == audmapp_read(auchan, PDMACHCR)) +			return; +		udelay(1); +	} +} + +static void audmapp_start_xfer(struct shdma_chan *schan, +			       struct shdma_desc *sdecs) +{ +	struct audmapp_chan *auchan = to_chan(schan); +	struct audmapp_device *audev = to_dev(auchan); +	struct audmapp_slave_config *cfg = auchan->config; +	struct device *dev = audev->dev; +	u32 chcr = cfg->chcr | PDMACHCR_DE; + +	dev_dbg(dev, "src/dst/chcr = %pad/%pad/%x\n", +		&cfg->src, &cfg->dst, cfg->chcr); + +	audmapp_write(auchan, cfg->src,	PDMASAR); +	audmapp_write(auchan, cfg->dst,	PDMADAR); +	audmapp_write(auchan, chcr,	PDMACHCR); +} + +static struct audmapp_slave_config * +audmapp_find_slave(struct audmapp_chan *auchan, int slave_id) +{ +	struct audmapp_device *audev = to_dev(auchan); +	struct audmapp_pdata *pdata = audev->pdata; +	struct audmapp_slave_config *cfg; +	int i; + +	if (slave_id >= AUDMAPP_SLAVE_NUMBER) +		return NULL; + +	for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) +		if (cfg->slave_id == slave_id) +			return cfg; + +	return NULL; +} + +static int audmapp_set_slave(struct shdma_chan *schan, int slave_id, +			     dma_addr_t slave_addr, bool try) +{ +	struct audmapp_chan *auchan = to_chan(schan); +	struct audmapp_slave_config *cfg = +		audmapp_find_slave(auchan, slave_id); + +	if (!cfg) +		return -ENODEV; +	if (try) +		return 0; + +	auchan->config	= cfg; + +	return 0; +} + +static int audmapp_desc_setup(struct shdma_chan *schan, +			      struct shdma_desc *sdecs, +			      dma_addr_t src, dma_addr_t dst, size_t *len) +{ +	struct audmapp_chan *auchan = to_chan(schan); +	struct audmapp_slave_config *cfg = auchan->config; + +	if (!cfg) +		return -ENODEV; + +	if (*len > (size_t)AUDMAPP_LEN_MAX) +		*len = (size_t)AUDMAPP_LEN_MAX; + +	return 0; +} + +static void audmapp_setup_xfer(struct shdma_chan *schan, +			       int slave_id) +{ +} + +static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan) +{ +	return 0; /* always fixed address */ +} + +static bool audmapp_channel_busy(struct shdma_chan *schan) +{ +	struct audmapp_chan *auchan = to_chan(schan); +	u32 chcr = audmapp_read(auchan, PDMACHCR); + +	return chcr & ~PDMACHCR_DE; +} + +static bool audmapp_desc_completed(struct shdma_chan *schan, +				   struct shdma_desc *sdesc) +{ +	return true; +} + +static struct shdma_desc *audmapp_embedded_desc(void *buf, int i) +{ +	return &((struct shdma_desc *)buf)[i]; +} + +static const struct shdma_ops audmapp_shdma_ops = { +	.halt_channel	= audmapp_halt, +	.desc_setup	= audmapp_desc_setup, +	.set_slave	= audmapp_set_slave, +	.start_xfer	= audmapp_start_xfer, +	.embedded_desc	= audmapp_embedded_desc, +	.setup_xfer	= audmapp_setup_xfer, +	.slave_addr	= audmapp_slave_addr, +	.channel_busy	= audmapp_channel_busy, +	.desc_completed	= audmapp_desc_completed, +}; + +static int audmapp_chan_probe(struct platform_device *pdev, +			      struct audmapp_device *audev, int id) +{ +	struct shdma_dev *sdev = &audev->shdma_dev; +	struct audmapp_chan *auchan; +	struct shdma_chan *schan; +	struct device *dev = audev->dev; + +	auchan = devm_kzalloc(dev, sizeof(*auchan), GFP_KERNEL); +	if (!auchan) +		return -ENOMEM; + +	schan = &auchan->shdma_chan; +	schan->max_xfer_len = AUDMAPP_LEN_MAX; + +	shdma_chan_probe(sdev, schan, id); + +	auchan->base = audev->chan_reg + 0x20 + (0x10 * id); +	dev_dbg(dev, "%02d : %p / %p", id, auchan->base, audev->chan_reg); + +	return 0; +} + +static void audmapp_chan_remove(struct audmapp_device *audev) +{ +	struct dma_device *dma_dev = &audev->shdma_dev.dma_dev; +	struct shdma_chan *schan; +	int i; + +	shdma_for_each_chan(schan, &audev->shdma_dev, i) { +		BUG_ON(!schan); +		shdma_chan_remove(schan); +	} +	dma_dev->chancnt = 0; +} + +static int audmapp_probe(struct platform_device *pdev) +{ +	struct audmapp_pdata *pdata = pdev->dev.platform_data; +	struct audmapp_device *audev; +	struct shdma_dev *sdev; +	struct dma_device *dma_dev; +	struct resource *res; +	int err, i; + +	if (!pdata) +		return -ENODEV; + +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + +	audev = devm_kzalloc(&pdev->dev, sizeof(*audev), GFP_KERNEL); +	if (!audev) +		return -ENOMEM; + +	audev->dev	= &pdev->dev; +	audev->pdata	= pdata; +	audev->chan_reg	= devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(audev->chan_reg)) +		return PTR_ERR(audev->chan_reg); + +	sdev		= &audev->shdma_dev; +	sdev->ops	= &audmapp_shdma_ops; +	sdev->desc_size	= sizeof(struct shdma_desc); + +	dma_dev			= &sdev->dma_dev; +	dma_dev->copy_align	= LOG2_DEFAULT_XFER_SIZE; +	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + +	err = shdma_init(&pdev->dev, sdev, AUDMAPP_MAX_CHANNELS); +	if (err < 0) +		return err; + +	platform_set_drvdata(pdev, audev); + +	/* Create DMA Channel */ +	for (i = 0; i < AUDMAPP_MAX_CHANNELS; i++) { +		err = audmapp_chan_probe(pdev, audev, i); +		if (err) +			goto chan_probe_err; +	} + +	err = dma_async_device_register(dma_dev); +	if (err < 0) +		goto chan_probe_err; + +	return err; + +chan_probe_err: +	audmapp_chan_remove(audev); +	shdma_cleanup(sdev); + +	return err; +} + +static int audmapp_remove(struct platform_device *pdev) +{ +	struct audmapp_device *audev = platform_get_drvdata(pdev); +	struct dma_device *dma_dev = &audev->shdma_dev.dma_dev; + +	dma_async_device_unregister(dma_dev); + +	audmapp_chan_remove(audev); +	shdma_cleanup(&audev->shdma_dev); + +	return 0; +} + +static struct platform_driver audmapp_driver = { +	.probe		= audmapp_probe, +	.remove		= audmapp_remove, +	.driver		= { +		.owner	= THIS_MODULE, +		.name	= "rcar-audmapp-engine", +	}, +}; +module_platform_driver(audmapp_driver); + +MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); +MODULE_DESCRIPTION("Renesas R-Car Audio DMAC peri-peri driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c new file mode 100644 index 00000000000..b212d9471ab --- /dev/null +++ b/drivers/dma/sh/rcar-hpbdma.c @@ -0,0 +1,666 @@ +/* + * Copyright (C) 2011-2013 Renesas Electronics Corporation + * Copyright (C) 2013 Cogent Embedded, Inc. + * + * This file is based on the drivers/dma/sh/shdma.c + * + * Renesas SuperH DMA Engine support + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * - DMA of SuperH does not have Hardware DMA chain mode. + * - max DMA size is 16MB. + * + */ + +#include <linux/dmaengine.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/platform_data/dma-rcar-hpbdma.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/shdma-base.h> +#include <linux/slab.h> + +/* DMA channel registers */ +#define HPB_DMAE_DSAR0	0x00 +#define HPB_DMAE_DDAR0	0x04 +#define HPB_DMAE_DTCR0	0x08 +#define HPB_DMAE_DSAR1	0x0C +#define HPB_DMAE_DDAR1	0x10 +#define HPB_DMAE_DTCR1	0x14 +#define HPB_DMAE_DSASR	0x18 +#define HPB_DMAE_DDASR	0x1C +#define HPB_DMAE_DTCSR	0x20 +#define HPB_DMAE_DPTR	0x24 +#define HPB_DMAE_DCR	0x28 +#define HPB_DMAE_DCMDR	0x2C +#define HPB_DMAE_DSTPR	0x30 +#define HPB_DMAE_DSTSR	0x34 +#define HPB_DMAE_DDBGR	0x38 +#define HPB_DMAE_DDBGR2	0x3C +#define HPB_DMAE_CHAN(n)	(0x40 * (n)) + +/* DMA command register (DCMDR) bits */ +#define HPB_DMAE_DCMDR_BDOUT	BIT(7) +#define HPB_DMAE_DCMDR_DQSPD	BIT(6) +#define HPB_DMAE_DCMDR_DQSPC	BIT(5) +#define HPB_DMAE_DCMDR_DMSPD	BIT(4) +#define HPB_DMAE_DCMDR_DMSPC	BIT(3) +#define HPB_DMAE_DCMDR_DQEND	BIT(2) +#define HPB_DMAE_DCMDR_DNXT	BIT(1) +#define HPB_DMAE_DCMDR_DMEN	BIT(0) + +/* DMA forced stop register (DSTPR) bits */ +#define HPB_DMAE_DSTPR_DMSTP	BIT(0) + +/* DMA status register (DSTSR) bits */ +#define HPB_DMAE_DSTSR_DQSTS	BIT(2) +#define HPB_DMAE_DSTSR_DMSTS	BIT(0) + +/* DMA common registers */ +#define HPB_DMAE_DTIMR		0x00 +#define HPB_DMAE_DINTSR0		0x0C +#define HPB_DMAE_DINTSR1		0x10 +#define HPB_DMAE_DINTCR0		0x14 +#define HPB_DMAE_DINTCR1		0x18 +#define HPB_DMAE_DINTMR0		0x1C +#define HPB_DMAE_DINTMR1		0x20 +#define HPB_DMAE_DACTSR0		0x24 +#define HPB_DMAE_DACTSR1		0x28 +#define HPB_DMAE_HSRSTR(n)	(0x40 + (n) * 4) +#define HPB_DMAE_HPB_DMASPR(n)	(0x140 + (n) * 4) +#define HPB_DMAE_HPB_DMLVLR0	0x160 +#define HPB_DMAE_HPB_DMLVLR1	0x164 +#define HPB_DMAE_HPB_DMSHPT0	0x168 +#define HPB_DMAE_HPB_DMSHPT1	0x16C + +#define HPB_DMA_SLAVE_NUMBER 256 +#define HPB_DMA_TCR_MAX 0x01000000	/* 16 MiB */ + +struct hpb_dmae_chan { +	struct shdma_chan shdma_chan; +	int xfer_mode;			/* DMA transfer mode */ +#define XFER_SINGLE	1 +#define XFER_DOUBLE	2 +	unsigned plane_idx;		/* current DMA information set */ +	bool first_desc;		/* first/next transfer */ +	int xmit_shift;			/* log_2(bytes_per_xfer) */ +	void __iomem *base; +	const struct hpb_dmae_slave_config *cfg; +	char dev_id[16];		/* unique name per DMAC of channel */ +	dma_addr_t slave_addr; +}; + +struct hpb_dmae_device { +	struct shdma_dev shdma_dev; +	spinlock_t reg_lock;		/* comm_reg operation lock */ +	struct hpb_dmae_pdata *pdata; +	void __iomem *chan_reg; +	void __iomem *comm_reg; +	void __iomem *reset_reg; +	void __iomem *mode_reg; +}; + +struct hpb_dmae_regs { +	u32 sar; /* SAR / source address */ +	u32 dar; /* DAR / destination address */ +	u32 tcr; /* TCR / transfer count */ +}; + +struct hpb_desc { +	struct shdma_desc shdma_desc; +	struct hpb_dmae_regs hw; +	unsigned plane_idx; +}; + +#define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan) +#define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc) +#define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \ +				struct hpb_dmae_device, shdma_dev.dma_dev) + +static void ch_reg_write(struct hpb_dmae_chan *hpb_dc, u32 data, u32 reg) +{ +	iowrite32(data, hpb_dc->base + reg); +} + +static u32 ch_reg_read(struct hpb_dmae_chan *hpb_dc, u32 reg) +{ +	return ioread32(hpb_dc->base + reg); +} + +static void dcmdr_write(struct hpb_dmae_device *hpbdev, u32 data) +{ +	iowrite32(data, hpbdev->chan_reg + HPB_DMAE_DCMDR); +} + +static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch) +{ +	iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch)); +} + +static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch) +{ +	u32 v; + +	if (ch < 32) +		v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch; +	else +		v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32); +	return v & 0x1; +} + +static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch) +{ +	if (ch < 32) +		iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0); +	else +		iowrite32((0x1 << (ch - 32)), +			  hpbdev->comm_reg + HPB_DMAE_DINTCR1); +} + +static void asyncmdr_write(struct hpb_dmae_device *hpbdev, u32 data) +{ +	iowrite32(data, hpbdev->mode_reg); +} + +static u32 asyncmdr_read(struct hpb_dmae_device *hpbdev) +{ +	return ioread32(hpbdev->mode_reg); +} + +static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch) +{ +	u32 intreg; + +	spin_lock_irq(&hpbdev->reg_lock); +	if (ch < 32) { +		intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR0); +		iowrite32(BIT(ch) | intreg, +			  hpbdev->comm_reg + HPB_DMAE_DINTMR0); +	} else { +		intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR1); +		iowrite32(BIT(ch - 32) | intreg, +			  hpbdev->comm_reg + HPB_DMAE_DINTMR1); +	} +	spin_unlock_irq(&hpbdev->reg_lock); +} + +static void hpb_dmae_async_reset(struct hpb_dmae_device *hpbdev, u32 data) +{ +	u32 rstr; +	int timeout = 10000;	/* 100 ms */ + +	spin_lock(&hpbdev->reg_lock); +	rstr = ioread32(hpbdev->reset_reg); +	rstr |= data; +	iowrite32(rstr, hpbdev->reset_reg); +	do { +		rstr = ioread32(hpbdev->reset_reg); +		if ((rstr & data) == data) +			break; +		udelay(10); +	} while (timeout--); + +	if (timeout < 0) +		dev_err(hpbdev->shdma_dev.dma_dev.dev, +			"%s timeout\n", __func__); + +	rstr &= ~data; +	iowrite32(rstr, hpbdev->reset_reg); +	spin_unlock(&hpbdev->reg_lock); +} + +static void hpb_dmae_set_async_mode(struct hpb_dmae_device *hpbdev, +				    u32 mask, u32 data) +{ +	u32 mode; + +	spin_lock_irq(&hpbdev->reg_lock); +	mode = asyncmdr_read(hpbdev); +	mode &= ~mask; +	mode |= data; +	asyncmdr_write(hpbdev, mode); +	spin_unlock_irq(&hpbdev->reg_lock); +} + +static void hpb_dmae_ctl_stop(struct hpb_dmae_device *hpbdev) +{ +	dcmdr_write(hpbdev, HPB_DMAE_DCMDR_DQSPD); +} + +static void hpb_dmae_reset(struct hpb_dmae_device *hpbdev) +{ +	u32 ch; + +	for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++) +		hsrstr_write(hpbdev, ch); +} + +static unsigned int calc_xmit_shift(struct hpb_dmae_chan *hpb_chan) +{ +	struct hpb_dmae_device *hpbdev = to_dev(hpb_chan); +	struct hpb_dmae_pdata *pdata = hpbdev->pdata; +	int width = ch_reg_read(hpb_chan, HPB_DMAE_DCR); +	int i; + +	switch (width & (HPB_DMAE_DCR_SPDS_MASK | HPB_DMAE_DCR_DPDS_MASK)) { +	case HPB_DMAE_DCR_SPDS_8BIT | HPB_DMAE_DCR_DPDS_8BIT: +	default: +		i = XMIT_SZ_8BIT; +		break; +	case HPB_DMAE_DCR_SPDS_16BIT | HPB_DMAE_DCR_DPDS_16BIT: +		i = XMIT_SZ_16BIT; +		break; +	case HPB_DMAE_DCR_SPDS_32BIT | HPB_DMAE_DCR_DPDS_32BIT: +		i = XMIT_SZ_32BIT; +		break; +	} +	return pdata->ts_shift[i]; +} + +static void hpb_dmae_set_reg(struct hpb_dmae_chan *hpb_chan, +			     struct hpb_dmae_regs *hw, unsigned plane) +{ +	ch_reg_write(hpb_chan, hw->sar, +		     plane ? HPB_DMAE_DSAR1 : HPB_DMAE_DSAR0); +	ch_reg_write(hpb_chan, hw->dar, +		     plane ? HPB_DMAE_DDAR1 : HPB_DMAE_DDAR0); +	ch_reg_write(hpb_chan, hw->tcr >> hpb_chan->xmit_shift, +		     plane ? HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0); +} + +static void hpb_dmae_start(struct hpb_dmae_chan *hpb_chan, bool next) +{ +	ch_reg_write(hpb_chan, (next ? HPB_DMAE_DCMDR_DNXT : 0) | +		     HPB_DMAE_DCMDR_DMEN, HPB_DMAE_DCMDR); +} + +static void hpb_dmae_halt(struct shdma_chan *schan) +{ +	struct hpb_dmae_chan *chan = to_chan(schan); + +	ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR); +	ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR); + +	chan->plane_idx = 0; +	chan->first_desc = true; +} + +static const struct hpb_dmae_slave_config * +hpb_dmae_find_slave(struct hpb_dmae_chan *hpb_chan, int slave_id) +{ +	struct hpb_dmae_device *hpbdev = to_dev(hpb_chan); +	struct hpb_dmae_pdata *pdata = hpbdev->pdata; +	int i; + +	if (slave_id >= HPB_DMA_SLAVE_NUMBER) +		return NULL; + +	for (i = 0; i < pdata->num_slaves; i++) +		if (pdata->slaves[i].id == slave_id) +			return pdata->slaves + i; + +	return NULL; +} + +static void hpb_dmae_start_xfer(struct shdma_chan *schan, +				struct shdma_desc *sdesc) +{ +	struct hpb_dmae_chan *chan = to_chan(schan); +	struct hpb_dmae_device *hpbdev = to_dev(chan); +	struct hpb_desc *desc = to_desc(sdesc); + +	if (chan->cfg->flags & HPB_DMAE_SET_ASYNC_RESET) +		hpb_dmae_async_reset(hpbdev, chan->cfg->rstr); + +	desc->plane_idx = chan->plane_idx; +	hpb_dmae_set_reg(chan, &desc->hw, chan->plane_idx); +	hpb_dmae_start(chan, !chan->first_desc); + +	if (chan->xfer_mode == XFER_DOUBLE) { +		chan->plane_idx ^= 1; +		chan->first_desc = false; +	} +} + +static bool hpb_dmae_desc_completed(struct shdma_chan *schan, +				    struct shdma_desc *sdesc) +{ +	/* +	 * This is correct since we always have at most single +	 * outstanding DMA transfer per channel, and by the time +	 * we get completion interrupt the transfer is completed. +	 * This will change if we ever use alternating DMA +	 * information sets and submit two descriptors at once. +	 */ +	return true; +} + +static bool hpb_dmae_chan_irq(struct shdma_chan *schan, int irq) +{ +	struct hpb_dmae_chan *chan = to_chan(schan); +	struct hpb_dmae_device *hpbdev = to_dev(chan); +	int ch = chan->cfg->dma_ch; + +	/* Check Complete DMA Transfer */ +	if (dintsr_read(hpbdev, ch)) { +		/* Clear Interrupt status */ +		dintcr_write(hpbdev, ch); +		return true; +	} +	return false; +} + +static int hpb_dmae_desc_setup(struct shdma_chan *schan, +			       struct shdma_desc *sdesc, +			       dma_addr_t src, dma_addr_t dst, size_t *len) +{ +	struct hpb_desc *desc = to_desc(sdesc); + +	if (*len > (size_t)HPB_DMA_TCR_MAX) +		*len = (size_t)HPB_DMA_TCR_MAX; + +	desc->hw.sar = src; +	desc->hw.dar = dst; +	desc->hw.tcr = *len; + +	return 0; +} + +static size_t hpb_dmae_get_partial(struct shdma_chan *schan, +				   struct shdma_desc *sdesc) +{ +	struct hpb_desc *desc = to_desc(sdesc); +	struct hpb_dmae_chan *chan = to_chan(schan); +	u32 tcr = ch_reg_read(chan, desc->plane_idx ? +			      HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0); + +	return (desc->hw.tcr - tcr) << chan->xmit_shift; +} + +static bool hpb_dmae_channel_busy(struct shdma_chan *schan) +{ +	struct hpb_dmae_chan *chan = to_chan(schan); +	u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR); + +	if (chan->xfer_mode == XFER_DOUBLE) +		return dstsr & HPB_DMAE_DSTSR_DQSTS; +	else +		return dstsr & HPB_DMAE_DSTSR_DMSTS; +} + +static int +hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan, +			      const struct hpb_dmae_slave_config *cfg) +{ +	struct hpb_dmae_device *hpbdev = to_dev(hpb_chan); +	struct hpb_dmae_pdata *pdata = hpbdev->pdata; +	const struct hpb_dmae_channel *channel = pdata->channels; +	int slave_id = cfg->id; +	int i, err; + +	for (i = 0; i < pdata->num_channels; i++, channel++) { +		if (channel->s_id == slave_id) { +			struct device *dev = hpb_chan->shdma_chan.dev; + +			hpb_chan->base = hpbdev->chan_reg + +				HPB_DMAE_CHAN(cfg->dma_ch); + +			dev_dbg(dev, "Detected Slave device\n"); +			dev_dbg(dev, " -- slave_id       : 0x%x\n", slave_id); +			dev_dbg(dev, " -- cfg->dma_ch    : %d\n", cfg->dma_ch); +			dev_dbg(dev, " -- channel->ch_irq: %d\n", +				channel->ch_irq); +			break; +		} +	} + +	err = shdma_request_irq(&hpb_chan->shdma_chan, channel->ch_irq, +				IRQF_SHARED, hpb_chan->dev_id); +	if (err) { +		dev_err(hpb_chan->shdma_chan.dev, +			"DMA channel request_irq %d failed with error %d\n", +			channel->ch_irq, err); +		return err; +	} + +	hpb_chan->plane_idx = 0; +	hpb_chan->first_desc = true; + +	if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == 0) { +		hpb_chan->xfer_mode = XFER_SINGLE; +	} else if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == +		   (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) { +		hpb_chan->xfer_mode = XFER_DOUBLE; +	} else { +		dev_err(hpb_chan->shdma_chan.dev, "DCR setting error"); +		return -EINVAL; +	} + +	if (cfg->flags & HPB_DMAE_SET_ASYNC_MODE) +		hpb_dmae_set_async_mode(hpbdev, cfg->mdm, cfg->mdr); +	ch_reg_write(hpb_chan, cfg->dcr, HPB_DMAE_DCR); +	ch_reg_write(hpb_chan, cfg->port, HPB_DMAE_DPTR); +	hpb_chan->xmit_shift = calc_xmit_shift(hpb_chan); +	hpb_dmae_enable_int(hpbdev, cfg->dma_ch); + +	return 0; +} + +static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, +			      dma_addr_t slave_addr, bool try) +{ +	struct hpb_dmae_chan *chan = to_chan(schan); +	const struct hpb_dmae_slave_config *sc = +		hpb_dmae_find_slave(chan, slave_id); + +	if (!sc) +		return -ENODEV; +	if (try) +		return 0; +	chan->cfg = sc; +	chan->slave_addr = slave_addr ? : sc->addr; +	return hpb_dmae_alloc_chan_resources(chan, sc); +} + +static void hpb_dmae_setup_xfer(struct shdma_chan *schan, int slave_id) +{ +} + +static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan) +{ +	struct hpb_dmae_chan *chan = to_chan(schan); + +	return chan->slave_addr; +} + +static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i) +{ +	return &((struct hpb_desc *)buf)[i].shdma_desc; +} + +static const struct shdma_ops hpb_dmae_ops = { +	.desc_completed = hpb_dmae_desc_completed, +	.halt_channel = hpb_dmae_halt, +	.channel_busy = hpb_dmae_channel_busy, +	.slave_addr = hpb_dmae_slave_addr, +	.desc_setup = hpb_dmae_desc_setup, +	.set_slave = hpb_dmae_set_slave, +	.setup_xfer = hpb_dmae_setup_xfer, +	.start_xfer = hpb_dmae_start_xfer, +	.embedded_desc = hpb_dmae_embedded_desc, +	.chan_irq = hpb_dmae_chan_irq, +	.get_partial = hpb_dmae_get_partial, +}; + +static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id) +{ +	struct shdma_dev *sdev = &hpbdev->shdma_dev; +	struct platform_device *pdev = +		to_platform_device(hpbdev->shdma_dev.dma_dev.dev); +	struct hpb_dmae_chan *new_hpb_chan; +	struct shdma_chan *schan; + +	/* Alloc channel */ +	new_hpb_chan = devm_kzalloc(&pdev->dev, +				    sizeof(struct hpb_dmae_chan), GFP_KERNEL); +	if (!new_hpb_chan) { +		dev_err(hpbdev->shdma_dev.dma_dev.dev, +			"No free memory for allocating DMA channels!\n"); +		return -ENOMEM; +	} + +	schan = &new_hpb_chan->shdma_chan; +	schan->max_xfer_len = HPB_DMA_TCR_MAX; + +	shdma_chan_probe(sdev, schan, id); + +	if (pdev->id >= 0) +		snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id), +			 "hpb-dmae%d.%d", pdev->id, id); +	else +		snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id), +			 "hpb-dma.%d", id); + +	return 0; +} + +static int hpb_dmae_probe(struct platform_device *pdev) +{ +	struct hpb_dmae_pdata *pdata = pdev->dev.platform_data; +	struct hpb_dmae_device *hpbdev; +	struct dma_device *dma_dev; +	struct resource *chan, *comm, *rest, *mode, *irq_res; +	int err, i; + +	/* Get platform data */ +	if (!pdata || !pdata->num_channels) +		return -ENODEV; + +	chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	comm = platform_get_resource(pdev, IORESOURCE_MEM, 1); +	rest = platform_get_resource(pdev, IORESOURCE_MEM, 2); +	mode = platform_get_resource(pdev, IORESOURCE_MEM, 3); + +	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); +	if (!irq_res) +		return -ENODEV; + +	hpbdev = devm_kzalloc(&pdev->dev, sizeof(struct hpb_dmae_device), +			      GFP_KERNEL); +	if (!hpbdev) { +		dev_err(&pdev->dev, "Not enough memory\n"); +		return -ENOMEM; +	} + +	hpbdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); +	if (IS_ERR(hpbdev->chan_reg)) +		return PTR_ERR(hpbdev->chan_reg); + +	hpbdev->comm_reg = devm_ioremap_resource(&pdev->dev, comm); +	if (IS_ERR(hpbdev->comm_reg)) +		return PTR_ERR(hpbdev->comm_reg); + +	hpbdev->reset_reg = devm_ioremap_resource(&pdev->dev, rest); +	if (IS_ERR(hpbdev->reset_reg)) +		return PTR_ERR(hpbdev->reset_reg); + +	hpbdev->mode_reg = devm_ioremap_resource(&pdev->dev, mode); +	if (IS_ERR(hpbdev->mode_reg)) +		return PTR_ERR(hpbdev->mode_reg); + +	dma_dev = &hpbdev->shdma_dev.dma_dev; + +	spin_lock_init(&hpbdev->reg_lock); + +	/* Platform data */ +	hpbdev->pdata = pdata; + +	pm_runtime_enable(&pdev->dev); +	err = pm_runtime_get_sync(&pdev->dev); +	if (err < 0) +		dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); + +	/* Reset DMA controller */ +	hpb_dmae_reset(hpbdev); + +	pm_runtime_put(&pdev->dev); + +	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); +	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + +	hpbdev->shdma_dev.ops = &hpb_dmae_ops; +	hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc); +	err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels); +	if (err < 0) +		goto error; + +	/* Create DMA channels */ +	for (i = 0; i < pdata->num_channels; i++) +		hpb_dmae_chan_probe(hpbdev, i); + +	platform_set_drvdata(pdev, hpbdev); +	err = dma_async_device_register(dma_dev); +	if (!err) +		return 0; + +	shdma_cleanup(&hpbdev->shdma_dev); +error: +	pm_runtime_disable(&pdev->dev); +	return err; +} + +static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev) +{ +	struct dma_device *dma_dev = &hpbdev->shdma_dev.dma_dev; +	struct shdma_chan *schan; +	int i; + +	shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) { +		BUG_ON(!schan); + +		shdma_chan_remove(schan); +	} +	dma_dev->chancnt = 0; +} + +static int hpb_dmae_remove(struct platform_device *pdev) +{ +	struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev); + +	dma_async_device_unregister(&hpbdev->shdma_dev.dma_dev); + +	pm_runtime_disable(&pdev->dev); + +	hpb_dmae_chan_remove(hpbdev); + +	return 0; +} + +static void hpb_dmae_shutdown(struct platform_device *pdev) +{ +	struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev); +	hpb_dmae_ctl_stop(hpbdev); +} + +static struct platform_driver hpb_dmae_driver = { +	.probe		= hpb_dmae_probe, +	.remove		= hpb_dmae_remove, +	.shutdown	= hpb_dmae_shutdown, +	.driver = { +		.owner	= THIS_MODULE, +		.name	= "hpb-dma-engine", +	}, +}; +module_platform_driver(hpb_dmae_driver); + +MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>"); +MODULE_DESCRIPTION("Renesas HPB DMA Engine driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h new file mode 100644 index 00000000000..a2b8258426c --- /dev/null +++ b/drivers/dma/sh/shdma-arm.h @@ -0,0 +1,51 @@ +/* + * Renesas SuperH DMA Engine support + * + * Copyright (C) 2013 Renesas Electronics, Inc. + * + * This is free software; you can redistribute it and/or modify it under the + * terms of version 2 the GNU General Public License as published by the Free + * Software Foundation. + */ + +#ifndef SHDMA_ARM_H +#define SHDMA_ARM_H + +#include "shdma.h" + +/* Transmit sizes and respective CHCR register values */ +enum { +	XMIT_SZ_8BIT		= 0, +	XMIT_SZ_16BIT		= 1, +	XMIT_SZ_32BIT		= 2, +	XMIT_SZ_64BIT		= 7, +	XMIT_SZ_128BIT		= 3, +	XMIT_SZ_256BIT		= 4, +	XMIT_SZ_512BIT		= 5, +}; + +/* log2(size / 8) - used to calculate number of transfers */ +#define SH_DMAE_TS_SHIFT {		\ +	[XMIT_SZ_8BIT]		= 0,	\ +	[XMIT_SZ_16BIT]		= 1,	\ +	[XMIT_SZ_32BIT]		= 2,	\ +	[XMIT_SZ_64BIT]		= 3,	\ +	[XMIT_SZ_128BIT]	= 4,	\ +	[XMIT_SZ_256BIT]	= 5,	\ +	[XMIT_SZ_512BIT]	= 6,	\ +} + +#define TS_LOW_BIT	0x3 /* --xx */ +#define TS_HI_BIT	0xc /* xx-- */ + +#define TS_LOW_SHIFT	(3) +#define TS_HI_SHIFT	(20 - 2)	/* 2 bits for shifted low TS */ + +#define TS_INDEX2VAL(i) \ +	((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\ +	 (((i) & TS_HI_BIT)  << TS_HI_SHIFT)) + +#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz))) +#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz))) + +#endif diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c new file mode 100644 index 00000000000..b35007e21e6 --- /dev/null +++ b/drivers/dma/sh/shdma-base.c @@ -0,0 +1,1016 @@ +/* + * Dmaengine driver base library for DMA controllers, found on SH-based SoCs + * + * extracted from shdma.c + * + * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ + +#include <linux/delay.h> +#include <linux/shdma-base.h> +#include <linux/dmaengine.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "../dmaengine.h" + +/* DMA descriptor control */ +enum shdma_desc_status { +	DESC_IDLE, +	DESC_PREPARED, +	DESC_SUBMITTED, +	DESC_COMPLETED,	/* completed, have to call callback */ +	DESC_WAITING,	/* callback called, waiting for ack / re-submit */ +}; + +#define NR_DESCS_PER_CHANNEL 32 + +#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) +#define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev) + +/* + * For slave DMA we assume, that there is a finite number of DMA slaves in the + * system, and that each such slave can only use a finite number of channels. + * We use slave channel IDs to make sure, that no such slave channel ID is + * allocated more than once. + */ +static unsigned int slave_num = 256; +module_param(slave_num, uint, 0444); + +/* A bitmask with slave_num bits */ +static unsigned long *shdma_slave_used; + +/* Called under spin_lock_irq(&schan->chan_lock") */ +static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan) +{ +	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); +	const struct shdma_ops *ops = sdev->ops; +	struct shdma_desc *sdesc; + +	/* DMA work check */ +	if (ops->channel_busy(schan)) +		return; + +	/* Find the first not transferred descriptor */ +	list_for_each_entry(sdesc, &schan->ld_queue, node) +		if (sdesc->mark == DESC_SUBMITTED) { +			ops->start_xfer(schan, sdesc); +			break; +		} +} + +static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) +{ +	struct shdma_desc *chunk, *c, *desc = +		container_of(tx, struct shdma_desc, async_tx); +	struct shdma_chan *schan = to_shdma_chan(tx->chan); +	dma_async_tx_callback callback = tx->callback; +	dma_cookie_t cookie; +	bool power_up; + +	spin_lock_irq(&schan->chan_lock); + +	power_up = list_empty(&schan->ld_queue); + +	cookie = dma_cookie_assign(tx); + +	/* Mark all chunks of this descriptor as submitted, move to the queue */ +	list_for_each_entry_safe(chunk, c, desc->node.prev, node) { +		/* +		 * All chunks are on the global ld_free, so, we have to find +		 * the end of the chain ourselves +		 */ +		if (chunk != desc && (chunk->mark == DESC_IDLE || +				      chunk->async_tx.cookie > 0 || +				      chunk->async_tx.cookie == -EBUSY || +				      &chunk->node == &schan->ld_free)) +			break; +		chunk->mark = DESC_SUBMITTED; +		if (chunk->chunks == 1) { +			chunk->async_tx.callback = callback; +			chunk->async_tx.callback_param = tx->callback_param; +		} else { +			/* Callback goes to the last chunk */ +			chunk->async_tx.callback = NULL; +		} +		chunk->cookie = cookie; +		list_move_tail(&chunk->node, &schan->ld_queue); + +		dev_dbg(schan->dev, "submit #%d@%p on %d\n", +			tx->cookie, &chunk->async_tx, schan->id); +	} + +	if (power_up) { +		int ret; +		schan->pm_state = SHDMA_PM_BUSY; + +		ret = pm_runtime_get(schan->dev); + +		spin_unlock_irq(&schan->chan_lock); +		if (ret < 0) +			dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); + +		pm_runtime_barrier(schan->dev); + +		spin_lock_irq(&schan->chan_lock); + +		/* Have we been reset, while waiting? */ +		if (schan->pm_state != SHDMA_PM_ESTABLISHED) { +			struct shdma_dev *sdev = +				to_shdma_dev(schan->dma_chan.device); +			const struct shdma_ops *ops = sdev->ops; +			dev_dbg(schan->dev, "Bring up channel %d\n", +				schan->id); +			/* +			 * TODO: .xfer_setup() might fail on some platforms. +			 * Make it int then, on error remove chunks from the +			 * queue again +			 */ +			ops->setup_xfer(schan, schan->slave_id); + +			if (schan->pm_state == SHDMA_PM_PENDING) +				shdma_chan_xfer_ld_queue(schan); +			schan->pm_state = SHDMA_PM_ESTABLISHED; +		} +	} else { +		/* +		 * Tell .device_issue_pending() not to run the queue, interrupts +		 * will do it anyway +		 */ +		schan->pm_state = SHDMA_PM_PENDING; +	} + +	spin_unlock_irq(&schan->chan_lock); + +	return cookie; +} + +/* Called with desc_lock held */ +static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) +{ +	struct shdma_desc *sdesc; + +	list_for_each_entry(sdesc, &schan->ld_free, node) +		if (sdesc->mark != DESC_PREPARED) { +			BUG_ON(sdesc->mark != DESC_IDLE); +			list_del(&sdesc->node); +			return sdesc; +		} + +	return NULL; +} + +static int shdma_setup_slave(struct shdma_chan *schan, int slave_id, +			     dma_addr_t slave_addr) +{ +	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); +	const struct shdma_ops *ops = sdev->ops; +	int ret, match; + +	if (schan->dev->of_node) { +		match = schan->hw_req; +		ret = ops->set_slave(schan, match, slave_addr, true); +		if (ret < 0) +			return ret; + +		slave_id = schan->slave_id; +	} else { +		match = slave_id; +	} + +	if (slave_id < 0 || slave_id >= slave_num) +		return -EINVAL; + +	if (test_and_set_bit(slave_id, shdma_slave_used)) +		return -EBUSY; + +	ret = ops->set_slave(schan, match, slave_addr, false); +	if (ret < 0) { +		clear_bit(slave_id, shdma_slave_used); +		return ret; +	} + +	schan->slave_id = slave_id; + +	return 0; +} + +/* + * This is the standard shdma filter function to be used as a replacement to the + * "old" method, using the .private pointer. If for some reason you allocate a + * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter + * parameter. If this filter is used, the slave driver, after calling + * dma_request_channel(), will also have to call dmaengine_slave_config() with + * .slave_id, .direction, and either .src_addr or .dst_addr set. + * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE + * capability! If this becomes a requirement, hardware glue drivers, using this + * services would have to provide their own filters, which first would check + * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do + * this, and only then, in case of a match, call this common filter. + * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate(). + * In that case the MID-RID value is used for slave channel filtering and is + * passed to this function in the "arg" parameter. + */ +bool shdma_chan_filter(struct dma_chan *chan, void *arg) +{ +	struct shdma_chan *schan = to_shdma_chan(chan); +	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); +	const struct shdma_ops *ops = sdev->ops; +	int match = (long)arg; +	int ret; + +	if (match < 0) +		/* No slave requested - arbitrary channel */ +		return true; + +	if (!schan->dev->of_node && match >= slave_num) +		return false; + +	ret = ops->set_slave(schan, match, 0, true); +	if (ret < 0) +		return false; + +	return true; +} +EXPORT_SYMBOL(shdma_chan_filter); + +static int shdma_alloc_chan_resources(struct dma_chan *chan) +{ +	struct shdma_chan *schan = to_shdma_chan(chan); +	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); +	const struct shdma_ops *ops = sdev->ops; +	struct shdma_desc *desc; +	struct shdma_slave *slave = chan->private; +	int ret, i; + +	/* +	 * This relies on the guarantee from dmaengine that alloc_chan_resources +	 * never runs concurrently with itself or free_chan_resources. +	 */ +	if (slave) { +		/* Legacy mode: .private is set in filter */ +		ret = shdma_setup_slave(schan, slave->slave_id, 0); +		if (ret < 0) +			goto esetslave; +	} else { +		schan->slave_id = -EINVAL; +	} + +	schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, +			      sdev->desc_size, GFP_KERNEL); +	if (!schan->desc) { +		ret = -ENOMEM; +		goto edescalloc; +	} +	schan->desc_num = NR_DESCS_PER_CHANNEL; + +	for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) { +		desc = ops->embedded_desc(schan->desc, i); +		dma_async_tx_descriptor_init(&desc->async_tx, +					     &schan->dma_chan); +		desc->async_tx.tx_submit = shdma_tx_submit; +		desc->mark = DESC_IDLE; + +		list_add(&desc->node, &schan->ld_free); +	} + +	return NR_DESCS_PER_CHANNEL; + +edescalloc: +	if (slave) +esetslave: +		clear_bit(slave->slave_id, shdma_slave_used); +	chan->private = NULL; +	return ret; +} + +static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) +{ +	struct shdma_desc *desc, *_desc; +	/* Is the "exposed" head of a chain acked? */ +	bool head_acked = false; +	dma_cookie_t cookie = 0; +	dma_async_tx_callback callback = NULL; +	void *param = NULL; +	unsigned long flags; +	LIST_HEAD(cyclic_list); + +	spin_lock_irqsave(&schan->chan_lock, flags); +	list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { +		struct dma_async_tx_descriptor *tx = &desc->async_tx; + +		BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); +		BUG_ON(desc->mark != DESC_SUBMITTED && +		       desc->mark != DESC_COMPLETED && +		       desc->mark != DESC_WAITING); + +		/* +		 * queue is ordered, and we use this loop to (1) clean up all +		 * completed descriptors, and to (2) update descriptor flags of +		 * any chunks in a (partially) completed chain +		 */ +		if (!all && desc->mark == DESC_SUBMITTED && +		    desc->cookie != cookie) +			break; + +		if (tx->cookie > 0) +			cookie = tx->cookie; + +		if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { +			if (schan->dma_chan.completed_cookie != desc->cookie - 1) +				dev_dbg(schan->dev, +					"Completing cookie %d, expected %d\n", +					desc->cookie, +					schan->dma_chan.completed_cookie + 1); +			schan->dma_chan.completed_cookie = desc->cookie; +		} + +		/* Call callback on the last chunk */ +		if (desc->mark == DESC_COMPLETED && tx->callback) { +			desc->mark = DESC_WAITING; +			callback = tx->callback; +			param = tx->callback_param; +			dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", +				tx->cookie, tx, schan->id); +			BUG_ON(desc->chunks != 1); +			break; +		} + +		if (tx->cookie > 0 || tx->cookie == -EBUSY) { +			if (desc->mark == DESC_COMPLETED) { +				BUG_ON(tx->cookie < 0); +				desc->mark = DESC_WAITING; +			} +			head_acked = async_tx_test_ack(tx); +		} else { +			switch (desc->mark) { +			case DESC_COMPLETED: +				desc->mark = DESC_WAITING; +				/* Fall through */ +			case DESC_WAITING: +				if (head_acked) +					async_tx_ack(&desc->async_tx); +			} +		} + +		dev_dbg(schan->dev, "descriptor %p #%d completed.\n", +			tx, tx->cookie); + +		if (((desc->mark == DESC_COMPLETED || +		      desc->mark == DESC_WAITING) && +		     async_tx_test_ack(&desc->async_tx)) || all) { + +			if (all || !desc->cyclic) { +				/* Remove from ld_queue list */ +				desc->mark = DESC_IDLE; +				list_move(&desc->node, &schan->ld_free); +			} else { +				/* reuse as cyclic */ +				desc->mark = DESC_SUBMITTED; +				list_move_tail(&desc->node, &cyclic_list); +			} + +			if (list_empty(&schan->ld_queue)) { +				dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); +				pm_runtime_put(schan->dev); +				schan->pm_state = SHDMA_PM_ESTABLISHED; +			} +		} +	} + +	if (all && !callback) +		/* +		 * Terminating and the loop completed normally: forgive +		 * uncompleted cookies +		 */ +		schan->dma_chan.completed_cookie = schan->dma_chan.cookie; + +	list_splice_tail(&cyclic_list, &schan->ld_queue); + +	spin_unlock_irqrestore(&schan->chan_lock, flags); + +	if (callback) +		callback(param); + +	return callback; +} + +/* + * shdma_chan_ld_cleanup - Clean up link descriptors + * + * Clean up the ld_queue of DMA channel. + */ +static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all) +{ +	while (__ld_cleanup(schan, all)) +		; +} + +/* + * shdma_free_chan_resources - Free all resources of the channel. + */ +static void shdma_free_chan_resources(struct dma_chan *chan) +{ +	struct shdma_chan *schan = to_shdma_chan(chan); +	struct shdma_dev *sdev = to_shdma_dev(chan->device); +	const struct shdma_ops *ops = sdev->ops; +	LIST_HEAD(list); + +	/* Protect against ISR */ +	spin_lock_irq(&schan->chan_lock); +	ops->halt_channel(schan); +	spin_unlock_irq(&schan->chan_lock); + +	/* Now no new interrupts will occur */ + +	/* Prepared and not submitted descriptors can still be on the queue */ +	if (!list_empty(&schan->ld_queue)) +		shdma_chan_ld_cleanup(schan, true); + +	if (schan->slave_id >= 0) { +		/* The caller is holding dma_list_mutex */ +		clear_bit(schan->slave_id, shdma_slave_used); +		chan->private = NULL; +	} + +	spin_lock_irq(&schan->chan_lock); + +	list_splice_init(&schan->ld_free, &list); +	schan->desc_num = 0; + +	spin_unlock_irq(&schan->chan_lock); + +	kfree(schan->desc); +} + +/** + * shdma_add_desc - get, set up and return one transfer descriptor + * @schan:	DMA channel + * @flags:	DMA transfer flags + * @dst:	destination DMA address, incremented when direction equals + *		DMA_DEV_TO_MEM or DMA_MEM_TO_MEM + * @src:	source DMA address, incremented when direction equals + *		DMA_MEM_TO_DEV or DMA_MEM_TO_MEM + * @len:	DMA transfer length + * @first:	if NULL, set to the current descriptor and cookie set to -EBUSY + * @direction:	needed for slave DMA to decide which address to keep constant, + *		equals DMA_MEM_TO_MEM for MEMCPY + * Returns 0 or an error + * Locks: called with desc_lock held + */ +static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, +	unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len, +	struct shdma_desc **first, enum dma_transfer_direction direction) +{ +	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); +	const struct shdma_ops *ops = sdev->ops; +	struct shdma_desc *new; +	size_t copy_size = *len; + +	if (!copy_size) +		return NULL; + +	/* Allocate the link descriptor from the free list */ +	new = shdma_get_desc(schan); +	if (!new) { +		dev_err(schan->dev, "No free link descriptor available\n"); +		return NULL; +	} + +	ops->desc_setup(schan, new, *src, *dst, ©_size); + +	if (!*first) { +		/* First desc */ +		new->async_tx.cookie = -EBUSY; +		*first = new; +	} else { +		/* Other desc - invisible to the user */ +		new->async_tx.cookie = -EINVAL; +	} + +	dev_dbg(schan->dev, +		"chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n", +		copy_size, *len, src, dst, &new->async_tx, +		new->async_tx.cookie); + +	new->mark = DESC_PREPARED; +	new->async_tx.flags = flags; +	new->direction = direction; +	new->partial = 0; + +	*len -= copy_size; +	if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) +		*src += copy_size; +	if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) +		*dst += copy_size; + +	return new; +} + +/* + * shdma_prep_sg - prepare transfer descriptors from an SG list + * + * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also + * converted to scatter-gather to guarantee consistent locking and a correct + * list manipulation. For slave DMA direction carries the usual meaning, and, + * logically, the SG list is RAM and the addr variable contains slave address, + * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM + * and the SG list contains only one element and points at the source buffer. + */ +static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, +	struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, +	enum dma_transfer_direction direction, unsigned long flags, bool cyclic) +{ +	struct scatterlist *sg; +	struct shdma_desc *first = NULL, *new = NULL /* compiler... */; +	LIST_HEAD(tx_list); +	int chunks = 0; +	unsigned long irq_flags; +	int i; + +	for_each_sg(sgl, sg, sg_len, i) +		chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); + +	/* Have to lock the whole loop to protect against concurrent release */ +	spin_lock_irqsave(&schan->chan_lock, irq_flags); + +	/* +	 * Chaining: +	 * first descriptor is what user is dealing with in all API calls, its +	 *	cookie is at first set to -EBUSY, at tx-submit to a positive +	 *	number +	 * if more than one chunk is needed further chunks have cookie = -EINVAL +	 * the last chunk, if not equal to the first, has cookie = -ENOSPC +	 * all chunks are linked onto the tx_list head with their .node heads +	 *	only during this function, then they are immediately spliced +	 *	back onto the free list in form of a chain +	 */ +	for_each_sg(sgl, sg, sg_len, i) { +		dma_addr_t sg_addr = sg_dma_address(sg); +		size_t len = sg_dma_len(sg); + +		if (!len) +			goto err_get_desc; + +		do { +			dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n", +				i, sg, len, &sg_addr); + +			if (direction == DMA_DEV_TO_MEM) +				new = shdma_add_desc(schan, flags, +						&sg_addr, addr, &len, &first, +						direction); +			else +				new = shdma_add_desc(schan, flags, +						addr, &sg_addr, &len, &first, +						direction); +			if (!new) +				goto err_get_desc; + +			new->cyclic = cyclic; +			if (cyclic) +				new->chunks = 1; +			else +				new->chunks = chunks--; +			list_add_tail(&new->node, &tx_list); +		} while (len); +	} + +	if (new != first) +		new->async_tx.cookie = -ENOSPC; + +	/* Put them back on the free list, so, they don't get lost */ +	list_splice_tail(&tx_list, &schan->ld_free); + +	spin_unlock_irqrestore(&schan->chan_lock, irq_flags); + +	return &first->async_tx; + +err_get_desc: +	list_for_each_entry(new, &tx_list, node) +		new->mark = DESC_IDLE; +	list_splice(&tx_list, &schan->ld_free); + +	spin_unlock_irqrestore(&schan->chan_lock, irq_flags); + +	return NULL; +} + +static struct dma_async_tx_descriptor *shdma_prep_memcpy( +	struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, +	size_t len, unsigned long flags) +{ +	struct shdma_chan *schan = to_shdma_chan(chan); +	struct scatterlist sg; + +	if (!chan || !len) +		return NULL; + +	BUG_ON(!schan->desc_num); + +	sg_init_table(&sg, 1); +	sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, +		    offset_in_page(dma_src)); +	sg_dma_address(&sg) = dma_src; +	sg_dma_len(&sg) = len; + +	return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, +			     flags, false); +} + +static struct dma_async_tx_descriptor *shdma_prep_slave_sg( +	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, +	enum dma_transfer_direction direction, unsigned long flags, void *context) +{ +	struct shdma_chan *schan = to_shdma_chan(chan); +	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); +	const struct shdma_ops *ops = sdev->ops; +	int slave_id = schan->slave_id; +	dma_addr_t slave_addr; + +	if (!chan) +		return NULL; + +	BUG_ON(!schan->desc_num); + +	/* Someone calling slave DMA on a generic channel? */ +	if (slave_id < 0 || !sg_len) { +		dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n", +			 __func__, sg_len, slave_id); +		return NULL; +	} + +	slave_addr = ops->slave_addr(schan); + +	return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, +			     direction, flags, false); +} + +#define SHDMA_MAX_SG_LEN 32 + +static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( +	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, +	size_t period_len, enum dma_transfer_direction direction, +	unsigned long flags, void *context) +{ +	struct shdma_chan *schan = to_shdma_chan(chan); +	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); +	const struct shdma_ops *ops = sdev->ops; +	unsigned int sg_len = buf_len / period_len; +	int slave_id = schan->slave_id; +	dma_addr_t slave_addr; +	struct scatterlist sgl[SHDMA_MAX_SG_LEN]; +	int i; + +	if (!chan) +		return NULL; + +	BUG_ON(!schan->desc_num); + +	if (sg_len > SHDMA_MAX_SG_LEN) { +		dev_err(schan->dev, "sg length %d exceds limit %d", +				sg_len, SHDMA_MAX_SG_LEN); +		return NULL; +	} + +	/* Someone calling slave DMA on a generic channel? */ +	if (slave_id < 0 || (buf_len < period_len)) { +		dev_warn(schan->dev, +			"%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", +			__func__, buf_len, period_len, slave_id); +		return NULL; +	} + +	slave_addr = ops->slave_addr(schan); + +	sg_init_table(sgl, sg_len); +	for (i = 0; i < sg_len; i++) { +		dma_addr_t src = buf_addr + (period_len * i); + +		sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, +			    offset_in_page(src)); +		sg_dma_address(&sgl[i]) = src; +		sg_dma_len(&sgl[i]) = period_len; +	} + +	return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, +			     direction, flags, true); +} + +static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, +			  unsigned long arg) +{ +	struct shdma_chan *schan = to_shdma_chan(chan); +	struct shdma_dev *sdev = to_shdma_dev(chan->device); +	const struct shdma_ops *ops = sdev->ops; +	struct dma_slave_config *config; +	unsigned long flags; +	int ret; + +	switch (cmd) { +	case DMA_TERMINATE_ALL: +		spin_lock_irqsave(&schan->chan_lock, flags); +		ops->halt_channel(schan); + +		if (ops->get_partial && !list_empty(&schan->ld_queue)) { +			/* Record partial transfer */ +			struct shdma_desc *desc = list_first_entry(&schan->ld_queue, +						struct shdma_desc, node); +			desc->partial = ops->get_partial(schan, desc); +		} + +		spin_unlock_irqrestore(&schan->chan_lock, flags); + +		shdma_chan_ld_cleanup(schan, true); +		break; +	case DMA_SLAVE_CONFIG: +		/* +		 * So far only .slave_id is used, but the slave drivers are +		 * encouraged to also set a transfer direction and an address. +		 */ +		if (!arg) +			return -EINVAL; +		/* +		 * We could lock this, but you shouldn't be configuring the +		 * channel, while using it... +		 */ +		config = (struct dma_slave_config *)arg; +		ret = shdma_setup_slave(schan, config->slave_id, +					config->direction == DMA_DEV_TO_MEM ? +					config->src_addr : config->dst_addr); +		if (ret < 0) +			return ret; +		break; +	default: +		return -ENXIO; +	} + +	return 0; +} + +static void shdma_issue_pending(struct dma_chan *chan) +{ +	struct shdma_chan *schan = to_shdma_chan(chan); + +	spin_lock_irq(&schan->chan_lock); +	if (schan->pm_state == SHDMA_PM_ESTABLISHED) +		shdma_chan_xfer_ld_queue(schan); +	else +		schan->pm_state = SHDMA_PM_PENDING; +	spin_unlock_irq(&schan->chan_lock); +} + +static enum dma_status shdma_tx_status(struct dma_chan *chan, +					dma_cookie_t cookie, +					struct dma_tx_state *txstate) +{ +	struct shdma_chan *schan = to_shdma_chan(chan); +	enum dma_status status; +	unsigned long flags; + +	shdma_chan_ld_cleanup(schan, false); + +	spin_lock_irqsave(&schan->chan_lock, flags); + +	status = dma_cookie_status(chan, cookie, txstate); + +	/* +	 * If we don't find cookie on the queue, it has been aborted and we have +	 * to report error +	 */ +	if (status != DMA_COMPLETE) { +		struct shdma_desc *sdesc; +		status = DMA_ERROR; +		list_for_each_entry(sdesc, &schan->ld_queue, node) +			if (sdesc->cookie == cookie) { +				status = DMA_IN_PROGRESS; +				break; +			} +	} + +	spin_unlock_irqrestore(&schan->chan_lock, flags); + +	return status; +} + +/* Called from error IRQ or NMI */ +bool shdma_reset(struct shdma_dev *sdev) +{ +	const struct shdma_ops *ops = sdev->ops; +	struct shdma_chan *schan; +	unsigned int handled = 0; +	int i; + +	/* Reset all channels */ +	shdma_for_each_chan(schan, sdev, i) { +		struct shdma_desc *sdesc; +		LIST_HEAD(dl); + +		if (!schan) +			continue; + +		spin_lock(&schan->chan_lock); + +		/* Stop the channel */ +		ops->halt_channel(schan); + +		list_splice_init(&schan->ld_queue, &dl); + +		if (!list_empty(&dl)) { +			dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); +			pm_runtime_put(schan->dev); +		} +		schan->pm_state = SHDMA_PM_ESTABLISHED; + +		spin_unlock(&schan->chan_lock); + +		/* Complete all  */ +		list_for_each_entry(sdesc, &dl, node) { +			struct dma_async_tx_descriptor *tx = &sdesc->async_tx; +			sdesc->mark = DESC_IDLE; +			if (tx->callback) +				tx->callback(tx->callback_param); +		} + +		spin_lock(&schan->chan_lock); +		list_splice(&dl, &schan->ld_free); +		spin_unlock(&schan->chan_lock); + +		handled++; +	} + +	return !!handled; +} +EXPORT_SYMBOL(shdma_reset); + +static irqreturn_t chan_irq(int irq, void *dev) +{ +	struct shdma_chan *schan = dev; +	const struct shdma_ops *ops = +		to_shdma_dev(schan->dma_chan.device)->ops; +	irqreturn_t ret; + +	spin_lock(&schan->chan_lock); + +	ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE; + +	spin_unlock(&schan->chan_lock); + +	return ret; +} + +static irqreturn_t chan_irqt(int irq, void *dev) +{ +	struct shdma_chan *schan = dev; +	const struct shdma_ops *ops = +		to_shdma_dev(schan->dma_chan.device)->ops; +	struct shdma_desc *sdesc; + +	spin_lock_irq(&schan->chan_lock); +	list_for_each_entry(sdesc, &schan->ld_queue, node) { +		if (sdesc->mark == DESC_SUBMITTED && +		    ops->desc_completed(schan, sdesc)) { +			dev_dbg(schan->dev, "done #%d@%p\n", +				sdesc->async_tx.cookie, &sdesc->async_tx); +			sdesc->mark = DESC_COMPLETED; +			break; +		} +	} +	/* Next desc */ +	shdma_chan_xfer_ld_queue(schan); +	spin_unlock_irq(&schan->chan_lock); + +	shdma_chan_ld_cleanup(schan, false); + +	return IRQ_HANDLED; +} + +int shdma_request_irq(struct shdma_chan *schan, int irq, +			   unsigned long flags, const char *name) +{ +	int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq, +					    chan_irqt, flags, name, schan); + +	schan->irq = ret < 0 ? ret : irq; + +	return ret; +} +EXPORT_SYMBOL(shdma_request_irq); + +void shdma_chan_probe(struct shdma_dev *sdev, +			   struct shdma_chan *schan, int id) +{ +	schan->pm_state = SHDMA_PM_ESTABLISHED; + +	/* reference struct dma_device */ +	schan->dma_chan.device = &sdev->dma_dev; +	dma_cookie_init(&schan->dma_chan); + +	schan->dev = sdev->dma_dev.dev; +	schan->id = id; + +	if (!schan->max_xfer_len) +		schan->max_xfer_len = PAGE_SIZE; + +	spin_lock_init(&schan->chan_lock); + +	/* Init descripter manage list */ +	INIT_LIST_HEAD(&schan->ld_queue); +	INIT_LIST_HEAD(&schan->ld_free); + +	/* Add the channel to DMA device channel list */ +	list_add_tail(&schan->dma_chan.device_node, +			&sdev->dma_dev.channels); +	sdev->schan[sdev->dma_dev.chancnt++] = schan; +} +EXPORT_SYMBOL(shdma_chan_probe); + +void shdma_chan_remove(struct shdma_chan *schan) +{ +	list_del(&schan->dma_chan.device_node); +} +EXPORT_SYMBOL(shdma_chan_remove); + +int shdma_init(struct device *dev, struct shdma_dev *sdev, +		    int chan_num) +{ +	struct dma_device *dma_dev = &sdev->dma_dev; + +	/* +	 * Require all call-backs for now, they can trivially be made optional +	 * later as required +	 */ +	if (!sdev->ops || +	    !sdev->desc_size || +	    !sdev->ops->embedded_desc || +	    !sdev->ops->start_xfer || +	    !sdev->ops->setup_xfer || +	    !sdev->ops->set_slave || +	    !sdev->ops->desc_setup || +	    !sdev->ops->slave_addr || +	    !sdev->ops->channel_busy || +	    !sdev->ops->halt_channel || +	    !sdev->ops->desc_completed) +		return -EINVAL; + +	sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL); +	if (!sdev->schan) +		return -ENOMEM; + +	INIT_LIST_HEAD(&dma_dev->channels); + +	/* Common and MEMCPY operations */ +	dma_dev->device_alloc_chan_resources +		= shdma_alloc_chan_resources; +	dma_dev->device_free_chan_resources = shdma_free_chan_resources; +	dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy; +	dma_dev->device_tx_status = shdma_tx_status; +	dma_dev->device_issue_pending = shdma_issue_pending; + +	/* Compulsory for DMA_SLAVE fields */ +	dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; +	dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; +	dma_dev->device_control = shdma_control; + +	dma_dev->dev = dev; + +	return 0; +} +EXPORT_SYMBOL(shdma_init); + +void shdma_cleanup(struct shdma_dev *sdev) +{ +	kfree(sdev->schan); +} +EXPORT_SYMBOL(shdma_cleanup); + +static int __init shdma_enter(void) +{ +	shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) * +				    sizeof(long), GFP_KERNEL); +	if (!shdma_slave_used) +		return -ENOMEM; +	return 0; +} +module_init(shdma_enter); + +static void __exit shdma_exit(void) +{ +	kfree(shdma_slave_used); +} +module_exit(shdma_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("SH-DMA driver base library"); +MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c new file mode 100644 index 00000000000..b4ff9d3e56d --- /dev/null +++ b/drivers/dma/sh/shdma-of.c @@ -0,0 +1,80 @@ +/* + * SHDMA Device Tree glue + * + * Copyright (C) 2013 Renesas Electronics Inc. + * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ + +#include <linux/dmaengine.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_dma.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/shdma-base.h> + +#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) + +static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec, +				       struct of_dma *ofdma) +{ +	u32 id = dma_spec->args[0]; +	dma_cap_mask_t mask; +	struct dma_chan *chan; + +	if (dma_spec->args_count != 1) +		return NULL; + +	dma_cap_zero(mask); +	/* Only slave DMA channels can be allocated via DT */ +	dma_cap_set(DMA_SLAVE, mask); + +	chan = dma_request_channel(mask, shdma_chan_filter, +				   (void *)(uintptr_t)id); +	if (chan) +		to_shdma_chan(chan)->hw_req = id; + +	return chan; +} + +static int shdma_of_probe(struct platform_device *pdev) +{ +	const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev); +	int ret; + +	ret = of_dma_controller_register(pdev->dev.of_node, +					 shdma_of_xlate, pdev); +	if (ret < 0) +		return ret; + +	ret = of_platform_populate(pdev->dev.of_node, NULL, lookup, &pdev->dev); +	if (ret < 0) +		of_dma_controller_free(pdev->dev.of_node); + +	return ret; +} + +static const struct of_device_id shdma_of_match[] = { +	{ .compatible = "renesas,shdma-mux", }, +	{ } +}; +MODULE_DEVICE_TABLE(of, sh_dmae_of_match); + +static struct platform_driver shdma_of = { +	.driver		= { +		.owner	= THIS_MODULE, +		.name	= "shdma-of", +		.of_match_table = shdma_of_match, +	}, +	.probe		= shdma_of_probe, +}; + +module_platform_driver(shdma_of); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("SH-DMA driver DT glue"); +MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); diff --git a/drivers/dma/sh/shdma-r8a73a4.c b/drivers/dma/sh/shdma-r8a73a4.c new file mode 100644 index 00000000000..4fb99970a3e --- /dev/null +++ b/drivers/dma/sh/shdma-r8a73a4.c @@ -0,0 +1,77 @@ +/* + * Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs + * + * Copyright (C) 2013 Renesas Electronics, Inc. + * + * This is free software; you can redistribute it and/or modify it under the + * terms of version 2 the GNU General Public License as published by the Free + * Software Foundation. + */ +#include <linux/sh_dma.h> + +#include "shdma-arm.h" + +const unsigned int dma_ts_shift[] = SH_DMAE_TS_SHIFT; + +static const struct sh_dmae_slave_config dma_slaves[] = { +	{ +		.chcr		= CHCR_TX(XMIT_SZ_32BIT), +		.mid_rid	= 0xd1,		/* MMC0 Tx */ +	}, { +		.chcr		= CHCR_RX(XMIT_SZ_32BIT), +		.mid_rid	= 0xd2,		/* MMC0 Rx */ +	}, { +		.chcr		= CHCR_TX(XMIT_SZ_32BIT), +		.mid_rid	= 0xe1,		/* MMC1 Tx */ +	}, { +		.chcr		= CHCR_RX(XMIT_SZ_32BIT), +		.mid_rid	= 0xe2,		/* MMC1 Rx */ +	}, +}; + +#define DMAE_CHANNEL(a, b)				\ +	{						\ +		.offset         = (a) - 0x20,		\ +		.dmars          = (a) - 0x20 + 0x40,	\ +		.chclr_bit	= (b),			\ +		.chclr_offset	= 0x80 - 0x20,		\ +	} + +static const struct sh_dmae_channel dma_channels[] = { +	DMAE_CHANNEL(0x8000, 0), +	DMAE_CHANNEL(0x8080, 1), +	DMAE_CHANNEL(0x8100, 2), +	DMAE_CHANNEL(0x8180, 3), +	DMAE_CHANNEL(0x8200, 4), +	DMAE_CHANNEL(0x8280, 5), +	DMAE_CHANNEL(0x8300, 6), +	DMAE_CHANNEL(0x8380, 7), +	DMAE_CHANNEL(0x8400, 8), +	DMAE_CHANNEL(0x8480, 9), +	DMAE_CHANNEL(0x8500, 10), +	DMAE_CHANNEL(0x8580, 11), +	DMAE_CHANNEL(0x8600, 12), +	DMAE_CHANNEL(0x8680, 13), +	DMAE_CHANNEL(0x8700, 14), +	DMAE_CHANNEL(0x8780, 15), +	DMAE_CHANNEL(0x8800, 16), +	DMAE_CHANNEL(0x8880, 17), +	DMAE_CHANNEL(0x8900, 18), +	DMAE_CHANNEL(0x8980, 19), +}; + +const struct sh_dmae_pdata r8a73a4_dma_pdata = { +	.slave		= dma_slaves, +	.slave_num	= ARRAY_SIZE(dma_slaves), +	.channel	= dma_channels, +	.channel_num	= ARRAY_SIZE(dma_channels), +	.ts_low_shift	= TS_LOW_SHIFT, +	.ts_low_mask	= TS_LOW_BIT << TS_LOW_SHIFT, +	.ts_high_shift	= TS_HI_SHIFT, +	.ts_high_mask	= TS_HI_BIT << TS_HI_SHIFT, +	.ts_shift	= dma_ts_shift, +	.ts_shift_num	= ARRAY_SIZE(dma_ts_shift), +	.dmaor_init     = DMAOR_DME, +	.chclr_present	= 1, +	.chclr_bitwise	= 1, +}; diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h new file mode 100644 index 00000000000..758a57b5187 --- /dev/null +++ b/drivers/dma/sh/shdma.h @@ -0,0 +1,72 @@ +/* + * Renesas SuperH DMA Engine support + * + * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ +#ifndef __DMA_SHDMA_H +#define __DMA_SHDMA_H + +#include <linux/sh_dma.h> +#include <linux/shdma-base.h> +#include <linux/dmaengine.h> +#include <linux/interrupt.h> +#include <linux/list.h> + +#define SH_DMAE_MAX_CHANNELS 20 +#define SH_DMAE_TCR_MAX 0x00FFFFFF	/* 16MB */ + +struct device; + +struct sh_dmae_chan { +	struct shdma_chan shdma_chan; +	const struct sh_dmae_slave_config *config; /* Slave DMA configuration */ +	int xmit_shift;			/* log_2(bytes_per_xfer) */ +	void __iomem *base; +	char dev_id[16];		/* unique name per DMAC of channel */ +	int pm_error; +	dma_addr_t slave_addr; +}; + +struct sh_dmae_device { +	struct shdma_dev shdma_dev; +	struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS]; +	const struct sh_dmae_pdata *pdata; +	struct list_head node; +	void __iomem *chan_reg; +	void __iomem *dmars; +	unsigned int chcr_offset; +	u32 chcr_ie_bit; +}; + +struct sh_dmae_regs { +	u32 sar; /* SAR / source address */ +	u32 dar; /* DAR / destination address */ +	u32 tcr; /* TCR / transfer count */ +}; + +struct sh_dmae_desc { +	struct sh_dmae_regs hw; +	struct shdma_desc shdma_desc; +}; + +#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, shdma_chan) +#define to_sh_desc(lh) container_of(lh, struct sh_desc, node) +#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) +#define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\ +				     struct sh_dmae_device, shdma_dev.dma_dev) + +#ifdef CONFIG_SHDMA_R8A73A4 +extern const struct sh_dmae_pdata r8a73a4_dma_pdata; +#define r8a73a4_shdma_devid (&r8a73a4_dma_pdata) +#else +#define r8a73a4_shdma_devid NULL +#endif + +#endif	/* __DMA_SHDMA_H */ diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c new file mode 100644 index 00000000000..146d5df926d --- /dev/null +++ b/drivers/dma/sh/shdmac.c @@ -0,0 +1,960 @@ +/* + * Renesas SuperH DMA Engine support + * + * base is drivers/dma/flsdma.c + * + * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * - DMA of SuperH does not have Hardware DMA chain mode. + * - MAX DMA size is 16MB. + * + */ + +#include <linux/delay.h> +#include <linux/dmaengine.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kdebug.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/rculist.h> +#include <linux/sh_dma.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "../dmaengine.h" +#include "shdma.h" + +/* DMA register */ +#define SAR	0x00 +#define DAR	0x04 +#define TCR	0x08 +#define CHCR	0x0C +#define DMAOR	0x40 + +#define TEND	0x18 /* USB-DMAC */ + +#define SH_DMAE_DRV_NAME "sh-dma-engine" + +/* Default MEMCPY transfer size = 2^2 = 4 bytes */ +#define LOG2_DEFAULT_XFER_SIZE	2 +#define SH_DMA_SLAVE_NUMBER 256 +#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1) + +/* + * Used for write-side mutual exclusion for the global device list, + * read-side synchronization by way of RCU, and per-controller data. + */ +static DEFINE_SPINLOCK(sh_dmae_lock); +static LIST_HEAD(sh_dmae_devices); + +/* + * Different DMAC implementations provide different ways to clear DMA channels: + * (1) none - no CHCLR registers are available + * (2) one CHCLR register per channel - 0 has to be written to it to clear + *     channel buffers + * (3) one CHCLR per several channels - 1 has to be written to the bit, + *     corresponding to the specific channel to reset it + */ +static void channel_clear(struct sh_dmae_chan *sh_dc) +{ +	struct sh_dmae_device *shdev = to_sh_dev(sh_dc); +	const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel + +		sh_dc->shdma_chan.id; +	u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0; + +	__raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset); +} + +static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) +{ +	__raw_writel(data, sh_dc->base + reg); +} + +static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) +{ +	return __raw_readl(sh_dc->base + reg); +} + +static u16 dmaor_read(struct sh_dmae_device *shdev) +{ +	void __iomem *addr = shdev->chan_reg + DMAOR; + +	if (shdev->pdata->dmaor_is_32bit) +		return __raw_readl(addr); +	else +		return __raw_readw(addr); +} + +static void dmaor_write(struct sh_dmae_device *shdev, u16 data) +{ +	void __iomem *addr = shdev->chan_reg + DMAOR; + +	if (shdev->pdata->dmaor_is_32bit) +		__raw_writel(data, addr); +	else +		__raw_writew(data, addr); +} + +static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) +{ +	struct sh_dmae_device *shdev = to_sh_dev(sh_dc); + +	__raw_writel(data, sh_dc->base + shdev->chcr_offset); +} + +static u32 chcr_read(struct sh_dmae_chan *sh_dc) +{ +	struct sh_dmae_device *shdev = to_sh_dev(sh_dc); + +	return __raw_readl(sh_dc->base + shdev->chcr_offset); +} + +/* + * Reset DMA controller + * + * SH7780 has two DMAOR register + */ +static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) +{ +	unsigned short dmaor; +	unsigned long flags; + +	spin_lock_irqsave(&sh_dmae_lock, flags); + +	dmaor = dmaor_read(shdev); +	dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); + +	spin_unlock_irqrestore(&sh_dmae_lock, flags); +} + +static int sh_dmae_rst(struct sh_dmae_device *shdev) +{ +	unsigned short dmaor; +	unsigned long flags; + +	spin_lock_irqsave(&sh_dmae_lock, flags); + +	dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); + +	if (shdev->pdata->chclr_present) { +		int i; +		for (i = 0; i < shdev->pdata->channel_num; i++) { +			struct sh_dmae_chan *sh_chan = shdev->chan[i]; +			if (sh_chan) +				channel_clear(sh_chan); +		} +	} + +	dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); + +	dmaor = dmaor_read(shdev); + +	spin_unlock_irqrestore(&sh_dmae_lock, flags); + +	if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { +		dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n"); +		return -EIO; +	} +	if (shdev->pdata->dmaor_init & ~dmaor) +		dev_warn(shdev->shdma_dev.dma_dev.dev, +			 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", +			 dmaor, shdev->pdata->dmaor_init); +	return 0; +} + +static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) +{ +	u32 chcr = chcr_read(sh_chan); + +	if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) +		return true; /* working */ + +	return false; /* waiting */ +} + +static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) +{ +	struct sh_dmae_device *shdev = to_sh_dev(sh_chan); +	const struct sh_dmae_pdata *pdata = shdev->pdata; +	int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | +		((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); + +	if (cnt >= pdata->ts_shift_num) +		cnt = 0; + +	return pdata->ts_shift[cnt]; +} + +static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) +{ +	struct sh_dmae_device *shdev = to_sh_dev(sh_chan); +	const struct sh_dmae_pdata *pdata = shdev->pdata; +	int i; + +	for (i = 0; i < pdata->ts_shift_num; i++) +		if (pdata->ts_shift[i] == l2size) +			break; + +	if (i == pdata->ts_shift_num) +		i = 0; + +	return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | +		((i << pdata->ts_high_shift) & pdata->ts_high_mask); +} + +static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) +{ +	sh_dmae_writel(sh_chan, hw->sar, SAR); +	sh_dmae_writel(sh_chan, hw->dar, DAR); +	sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); +} + +static void dmae_start(struct sh_dmae_chan *sh_chan) +{ +	struct sh_dmae_device *shdev = to_sh_dev(sh_chan); +	u32 chcr = chcr_read(sh_chan); + +	if (shdev->pdata->needs_tend_set) +		sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); + +	chcr |= CHCR_DE | shdev->chcr_ie_bit; +	chcr_write(sh_chan, chcr & ~CHCR_TE); +} + +static void dmae_init(struct sh_dmae_chan *sh_chan) +{ +	/* +	 * Default configuration for dual address memory-memory transfer. +	 * 0x400 represents auto-request. +	 */ +	u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, +						   LOG2_DEFAULT_XFER_SIZE); +	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); +	chcr_write(sh_chan, chcr); +} + +static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) +{ +	/* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ +	if (dmae_is_busy(sh_chan)) +		return -EBUSY; + +	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); +	chcr_write(sh_chan, val); + +	return 0; +} + +static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) +{ +	struct sh_dmae_device *shdev = to_sh_dev(sh_chan); +	const struct sh_dmae_pdata *pdata = shdev->pdata; +	const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; +	void __iomem *addr = shdev->dmars; +	unsigned int shift = chan_pdata->dmars_bit; + +	if (dmae_is_busy(sh_chan)) +		return -EBUSY; + +	if (pdata->no_dmars) +		return 0; + +	/* in the case of a missing DMARS resource use first memory window */ +	if (!addr) +		addr = shdev->chan_reg; +	addr += chan_pdata->dmars; + +	__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), +		     addr); + +	return 0; +} + +static void sh_dmae_start_xfer(struct shdma_chan *schan, +			       struct shdma_desc *sdesc) +{ +	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, +						    shdma_chan); +	struct sh_dmae_desc *sh_desc = container_of(sdesc, +					struct sh_dmae_desc, shdma_desc); +	dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n", +		sdesc->async_tx.cookie, sh_chan->shdma_chan.id, +		sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar); +	/* Get the ld start address from ld_queue */ +	dmae_set_reg(sh_chan, &sh_desc->hw); +	dmae_start(sh_chan); +} + +static bool sh_dmae_channel_busy(struct shdma_chan *schan) +{ +	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, +						    shdma_chan); +	return dmae_is_busy(sh_chan); +} + +static void sh_dmae_setup_xfer(struct shdma_chan *schan, +			       int slave_id) +{ +	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, +						    shdma_chan); + +	if (slave_id >= 0) { +		const struct sh_dmae_slave_config *cfg = +			sh_chan->config; + +		dmae_set_dmars(sh_chan, cfg->mid_rid); +		dmae_set_chcr(sh_chan, cfg->chcr); +	} else { +		dmae_init(sh_chan); +	} +} + +/* + * Find a slave channel configuration from the contoller list by either a slave + * ID in the non-DT case, or by a MID/RID value in the DT case + */ +static const struct sh_dmae_slave_config *dmae_find_slave( +	struct sh_dmae_chan *sh_chan, int match) +{ +	struct sh_dmae_device *shdev = to_sh_dev(sh_chan); +	const struct sh_dmae_pdata *pdata = shdev->pdata; +	const struct sh_dmae_slave_config *cfg; +	int i; + +	if (!sh_chan->shdma_chan.dev->of_node) { +		if (match >= SH_DMA_SLAVE_NUMBER) +			return NULL; + +		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) +			if (cfg->slave_id == match) +				return cfg; +	} else { +		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) +			if (cfg->mid_rid == match) { +				sh_chan->shdma_chan.slave_id = i; +				return cfg; +			} +	} + +	return NULL; +} + +static int sh_dmae_set_slave(struct shdma_chan *schan, +			     int slave_id, dma_addr_t slave_addr, bool try) +{ +	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, +						    shdma_chan); +	const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); +	if (!cfg) +		return -ENXIO; + +	if (!try) { +		sh_chan->config = cfg; +		sh_chan->slave_addr = slave_addr ? : cfg->addr; +	} + +	return 0; +} + +static void dmae_halt(struct sh_dmae_chan *sh_chan) +{ +	struct sh_dmae_device *shdev = to_sh_dev(sh_chan); +	u32 chcr = chcr_read(sh_chan); + +	chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); +	chcr_write(sh_chan, chcr); +} + +static int sh_dmae_desc_setup(struct shdma_chan *schan, +			      struct shdma_desc *sdesc, +			      dma_addr_t src, dma_addr_t dst, size_t *len) +{ +	struct sh_dmae_desc *sh_desc = container_of(sdesc, +					struct sh_dmae_desc, shdma_desc); + +	if (*len > schan->max_xfer_len) +		*len = schan->max_xfer_len; + +	sh_desc->hw.sar = src; +	sh_desc->hw.dar = dst; +	sh_desc->hw.tcr = *len; + +	return 0; +} + +static void sh_dmae_halt(struct shdma_chan *schan) +{ +	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, +						    shdma_chan); +	dmae_halt(sh_chan); +} + +static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) +{ +	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, +						    shdma_chan); + +	if (!(chcr_read(sh_chan) & CHCR_TE)) +		return false; + +	/* DMA stop */ +	dmae_halt(sh_chan); + +	return true; +} + +static size_t sh_dmae_get_partial(struct shdma_chan *schan, +				  struct shdma_desc *sdesc) +{ +	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, +						    shdma_chan); +	struct sh_dmae_desc *sh_desc = container_of(sdesc, +					struct sh_dmae_desc, shdma_desc); +	return sh_desc->hw.tcr - +		(sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift); +} + +/* Called from error IRQ or NMI */ +static bool sh_dmae_reset(struct sh_dmae_device *shdev) +{ +	bool ret; + +	/* halt the dma controller */ +	sh_dmae_ctl_stop(shdev); + +	/* We cannot detect, which channel caused the error, have to reset all */ +	ret = shdma_reset(&shdev->shdma_dev); + +	sh_dmae_rst(shdev); + +	return ret; +} + +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM) +static irqreturn_t sh_dmae_err(int irq, void *data) +{ +	struct sh_dmae_device *shdev = data; + +	if (!(dmaor_read(shdev) & DMAOR_AE)) +		return IRQ_NONE; + +	sh_dmae_reset(shdev); +	return IRQ_HANDLED; +} +#endif + +static bool sh_dmae_desc_completed(struct shdma_chan *schan, +				   struct shdma_desc *sdesc) +{ +	struct sh_dmae_chan *sh_chan = container_of(schan, +					struct sh_dmae_chan, shdma_chan); +	struct sh_dmae_desc *sh_desc = container_of(sdesc, +					struct sh_dmae_desc, shdma_desc); +	u32 sar_buf = sh_dmae_readl(sh_chan, SAR); +	u32 dar_buf = sh_dmae_readl(sh_chan, DAR); + +	return	(sdesc->direction == DMA_DEV_TO_MEM && +		 (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) || +		(sdesc->direction != DMA_DEV_TO_MEM && +		 (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf); +} + +static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) +{ +	/* Fast path out if NMIF is not asserted for this controller */ +	if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) +		return false; + +	return sh_dmae_reset(shdev); +} + +static int sh_dmae_nmi_handler(struct notifier_block *self, +			       unsigned long cmd, void *data) +{ +	struct sh_dmae_device *shdev; +	int ret = NOTIFY_DONE; +	bool triggered; + +	/* +	 * Only concern ourselves with NMI events. +	 * +	 * Normally we would check the die chain value, but as this needs +	 * to be architecture independent, check for NMI context instead. +	 */ +	if (!in_nmi()) +		return NOTIFY_DONE; + +	rcu_read_lock(); +	list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { +		/* +		 * Only stop if one of the controllers has NMIF asserted, +		 * we do not want to interfere with regular address error +		 * handling or NMI events that don't concern the DMACs. +		 */ +		triggered = sh_dmae_nmi_notify(shdev); +		if (triggered == true) +			ret = NOTIFY_OK; +	} +	rcu_read_unlock(); + +	return ret; +} + +static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { +	.notifier_call	= sh_dmae_nmi_handler, + +	/* Run before NMI debug handler and KGDB */ +	.priority	= 1, +}; + +static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, +					int irq, unsigned long flags) +{ +	const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; +	struct shdma_dev *sdev = &shdev->shdma_dev; +	struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); +	struct sh_dmae_chan *sh_chan; +	struct shdma_chan *schan; +	int err; + +	sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan), +			       GFP_KERNEL); +	if (!sh_chan) { +		dev_err(sdev->dma_dev.dev, +			"No free memory for allocating dma channels!\n"); +		return -ENOMEM; +	} + +	schan = &sh_chan->shdma_chan; +	schan->max_xfer_len = SH_DMA_TCR_MAX + 1; + +	shdma_chan_probe(sdev, schan, id); + +	sh_chan->base = shdev->chan_reg + chan_pdata->offset; + +	/* set up channel irq */ +	if (pdev->id >= 0) +		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), +			 "sh-dmae%d.%d", pdev->id, id); +	else +		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), +			 "sh-dma%d", id); + +	err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id); +	if (err) { +		dev_err(sdev->dma_dev.dev, +			"DMA channel %d request_irq error %d\n", +			id, err); +		goto err_no_irq; +	} + +	shdev->chan[id] = sh_chan; +	return 0; + +err_no_irq: +	/* remove from dmaengine device node */ +	shdma_chan_remove(schan); +	return err; +} + +static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) +{ +	struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; +	struct shdma_chan *schan; +	int i; + +	shdma_for_each_chan(schan, &shdev->shdma_dev, i) { +		BUG_ON(!schan); + +		shdma_chan_remove(schan); +	} +	dma_dev->chancnt = 0; +} + +static void sh_dmae_shutdown(struct platform_device *pdev) +{ +	struct sh_dmae_device *shdev = platform_get_drvdata(pdev); +	sh_dmae_ctl_stop(shdev); +} + +static int sh_dmae_runtime_suspend(struct device *dev) +{ +	return 0; +} + +static int sh_dmae_runtime_resume(struct device *dev) +{ +	struct sh_dmae_device *shdev = dev_get_drvdata(dev); + +	return sh_dmae_rst(shdev); +} + +#ifdef CONFIG_PM +static int sh_dmae_suspend(struct device *dev) +{ +	return 0; +} + +static int sh_dmae_resume(struct device *dev) +{ +	struct sh_dmae_device *shdev = dev_get_drvdata(dev); +	int i, ret; + +	ret = sh_dmae_rst(shdev); +	if (ret < 0) +		dev_err(dev, "Failed to reset!\n"); + +	for (i = 0; i < shdev->pdata->channel_num; i++) { +		struct sh_dmae_chan *sh_chan = shdev->chan[i]; + +		if (!sh_chan->shdma_chan.desc_num) +			continue; + +		if (sh_chan->shdma_chan.slave_id >= 0) { +			const struct sh_dmae_slave_config *cfg = sh_chan->config; +			dmae_set_dmars(sh_chan, cfg->mid_rid); +			dmae_set_chcr(sh_chan, cfg->chcr); +		} else { +			dmae_init(sh_chan); +		} +	} + +	return 0; +} +#else +#define sh_dmae_suspend NULL +#define sh_dmae_resume NULL +#endif + +static const struct dev_pm_ops sh_dmae_pm = { +	.suspend		= sh_dmae_suspend, +	.resume			= sh_dmae_resume, +	.runtime_suspend	= sh_dmae_runtime_suspend, +	.runtime_resume		= sh_dmae_runtime_resume, +}; + +static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) +{ +	struct sh_dmae_chan *sh_chan = container_of(schan, +					struct sh_dmae_chan, shdma_chan); + +	/* +	 * Implicit BUG_ON(!sh_chan->config) +	 * This is an exclusive slave DMA operation, may only be called after a +	 * successful slave configuration. +	 */ +	return sh_chan->slave_addr; +} + +static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) +{ +	return &((struct sh_dmae_desc *)buf)[i].shdma_desc; +} + +static const struct shdma_ops sh_dmae_shdma_ops = { +	.desc_completed = sh_dmae_desc_completed, +	.halt_channel = sh_dmae_halt, +	.channel_busy = sh_dmae_channel_busy, +	.slave_addr = sh_dmae_slave_addr, +	.desc_setup = sh_dmae_desc_setup, +	.set_slave = sh_dmae_set_slave, +	.setup_xfer = sh_dmae_setup_xfer, +	.start_xfer = sh_dmae_start_xfer, +	.embedded_desc = sh_dmae_embedded_desc, +	.chan_irq = sh_dmae_chan_irq, +	.get_partial = sh_dmae_get_partial, +}; + +static const struct of_device_id sh_dmae_of_match[] = { +	{.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,}, +	{} +}; +MODULE_DEVICE_TABLE(of, sh_dmae_of_match); + +static int sh_dmae_probe(struct platform_device *pdev) +{ +	const struct sh_dmae_pdata *pdata; +	unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {}; +	int chan_irq[SH_DMAE_MAX_CHANNELS]; +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM) +	unsigned long irqflags = 0; +	int errirq; +#endif +	int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; +	struct sh_dmae_device *shdev; +	struct dma_device *dma_dev; +	struct resource *chan, *dmars, *errirq_res, *chanirq_res; + +	if (pdev->dev.of_node) +		pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data; +	else +		pdata = dev_get_platdata(&pdev->dev); + +	/* get platform data */ +	if (!pdata || !pdata->channel_num) +		return -ENODEV; + +	chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	/* DMARS area is optional */ +	dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); +	/* +	 * IRQ resources: +	 * 1. there always must be at least one IRQ IO-resource. On SH4 it is +	 *    the error IRQ, in which case it is the only IRQ in this resource: +	 *    start == end. If it is the only IRQ resource, all channels also +	 *    use the same IRQ. +	 * 2. DMA channel IRQ resources can be specified one per resource or in +	 *    ranges (start != end) +	 * 3. iff all events (channels and, optionally, error) on this +	 *    controller use the same IRQ, only one IRQ resource can be +	 *    specified, otherwise there must be one IRQ per channel, even if +	 *    some of them are equal +	 * 4. if all IRQs on this controller are equal or if some specific IRQs +	 *    specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be +	 *    requested with the IRQF_SHARED flag +	 */ +	errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); +	if (!chan || !errirq_res) +		return -ENODEV; + +	shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device), +			     GFP_KERNEL); +	if (!shdev) { +		dev_err(&pdev->dev, "Not enough memory\n"); +		return -ENOMEM; +	} + +	dma_dev = &shdev->shdma_dev.dma_dev; + +	shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); +	if (IS_ERR(shdev->chan_reg)) +		return PTR_ERR(shdev->chan_reg); +	if (dmars) { +		shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars); +		if (IS_ERR(shdev->dmars)) +			return PTR_ERR(shdev->dmars); +	} + +	if (!pdata->slave_only) +		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); +	if (pdata->slave && pdata->slave_num) +		dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + +	/* Default transfer size of 32 bytes requires 32-byte alignment */ +	dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; + +	shdev->shdma_dev.ops = &sh_dmae_shdma_ops; +	shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc); +	err = shdma_init(&pdev->dev, &shdev->shdma_dev, +			      pdata->channel_num); +	if (err < 0) +		goto eshdma; + +	/* platform data */ +	shdev->pdata = pdata; + +	if (pdata->chcr_offset) +		shdev->chcr_offset = pdata->chcr_offset; +	else +		shdev->chcr_offset = CHCR; + +	if (pdata->chcr_ie_bit) +		shdev->chcr_ie_bit = pdata->chcr_ie_bit; +	else +		shdev->chcr_ie_bit = CHCR_IE; + +	platform_set_drvdata(pdev, shdev); + +	pm_runtime_enable(&pdev->dev); +	err = pm_runtime_get_sync(&pdev->dev); +	if (err < 0) +		dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); + +	spin_lock_irq(&sh_dmae_lock); +	list_add_tail_rcu(&shdev->node, &sh_dmae_devices); +	spin_unlock_irq(&sh_dmae_lock); + +	/* reset dma controller - only needed as a test */ +	err = sh_dmae_rst(shdev); +	if (err) +		goto rst_err; + +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) +	chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); + +	if (!chanirq_res) +		chanirq_res = errirq_res; +	else +		irqres++; + +	if (chanirq_res == errirq_res || +	    (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) +		irqflags = IRQF_SHARED; + +	errirq = errirq_res->start; + +	err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags, +			       "DMAC Address Error", shdev); +	if (err) { +		dev_err(&pdev->dev, +			"DMA failed requesting irq #%d, error %d\n", +			errirq, err); +		goto eirq_err; +	} + +#else +	chanirq_res = errirq_res; +#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ + +	if (chanirq_res->start == chanirq_res->end && +	    !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { +		/* Special case - all multiplexed */ +		for (; irq_cnt < pdata->channel_num; irq_cnt++) { +			if (irq_cnt < SH_DMAE_MAX_CHANNELS) { +				chan_irq[irq_cnt] = chanirq_res->start; +				chan_flag[irq_cnt] = IRQF_SHARED; +			} else { +				irq_cap = 1; +				break; +			} +		} +	} else { +		do { +			for (i = chanirq_res->start; i <= chanirq_res->end; i++) { +				if (irq_cnt >= SH_DMAE_MAX_CHANNELS) { +					irq_cap = 1; +					break; +				} + +				if ((errirq_res->flags & IORESOURCE_BITS) == +				    IORESOURCE_IRQ_SHAREABLE) +					chan_flag[irq_cnt] = IRQF_SHARED; +				else +					chan_flag[irq_cnt] = 0; +				dev_dbg(&pdev->dev, +					"Found IRQ %d for channel %d\n", +					i, irq_cnt); +				chan_irq[irq_cnt++] = i; +			} + +			if (irq_cnt >= SH_DMAE_MAX_CHANNELS) +				break; + +			chanirq_res = platform_get_resource(pdev, +						IORESOURCE_IRQ, ++irqres); +		} while (irq_cnt < pdata->channel_num && chanirq_res); +	} + +	/* Create DMA Channel */ +	for (i = 0; i < irq_cnt; i++) { +		err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); +		if (err) +			goto chan_probe_err; +	} + +	if (irq_cap) +		dev_notice(&pdev->dev, "Attempting to register %d DMA " +			   "channels when a maximum of %d are supported.\n", +			   pdata->channel_num, SH_DMAE_MAX_CHANNELS); + +	pm_runtime_put(&pdev->dev); + +	err = dma_async_device_register(&shdev->shdma_dev.dma_dev); +	if (err < 0) +		goto edmadevreg; + +	return err; + +edmadevreg: +	pm_runtime_get(&pdev->dev); + +chan_probe_err: +	sh_dmae_chan_remove(shdev); + +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) +eirq_err: +#endif +rst_err: +	spin_lock_irq(&sh_dmae_lock); +	list_del_rcu(&shdev->node); +	spin_unlock_irq(&sh_dmae_lock); + +	pm_runtime_put(&pdev->dev); +	pm_runtime_disable(&pdev->dev); + +	shdma_cleanup(&shdev->shdma_dev); +eshdma: +	synchronize_rcu(); + +	return err; +} + +static int sh_dmae_remove(struct platform_device *pdev) +{ +	struct sh_dmae_device *shdev = platform_get_drvdata(pdev); +	struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; + +	dma_async_device_unregister(dma_dev); + +	spin_lock_irq(&sh_dmae_lock); +	list_del_rcu(&shdev->node); +	spin_unlock_irq(&sh_dmae_lock); + +	pm_runtime_disable(&pdev->dev); + +	sh_dmae_chan_remove(shdev); +	shdma_cleanup(&shdev->shdma_dev); + +	synchronize_rcu(); + +	return 0; +} + +static struct platform_driver sh_dmae_driver = { +	.driver 	= { +		.owner	= THIS_MODULE, +		.pm	= &sh_dmae_pm, +		.name	= SH_DMAE_DRV_NAME, +		.of_match_table = sh_dmae_of_match, +	}, +	.remove		= sh_dmae_remove, +	.shutdown	= sh_dmae_shutdown, +}; + +static int __init sh_dmae_init(void) +{ +	/* Wire up NMI handling */ +	int err = register_die_notifier(&sh_dmae_nmi_notifier); +	if (err) +		return err; + +	return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); +} +module_init(sh_dmae_init); + +static void __exit sh_dmae_exit(void) +{ +	platform_driver_unregister(&sh_dmae_driver); + +	unregister_die_notifier(&sh_dmae_nmi_notifier); +} +module_exit(sh_dmae_exit); + +MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); +MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME); diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c new file mode 100644 index 00000000000..3ce10390989 --- /dev/null +++ b/drivers/dma/sh/sudmac.c @@ -0,0 +1,425 @@ +/* + * Renesas SUDMAC support + * + * Copyright (C) 2013 Renesas Solutions Corp. + * + * based on drivers/dma/sh/shdma.c: + * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ + +#include <linux/dmaengine.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/sudmac.h> + +struct sudmac_chan { +	struct shdma_chan shdma_chan; +	void __iomem *base; +	char dev_id[16];	/* unique name per DMAC of channel */ + +	u32 offset;		/* for CFG, BA, BBC, CA, CBC, DEN */ +	u32 cfg; +	u32 dint_end_bit; +}; + +struct sudmac_device { +	struct shdma_dev shdma_dev; +	struct sudmac_pdata *pdata; +	void __iomem *chan_reg; +}; + +struct sudmac_regs { +	u32 base_addr; +	u32 base_byte_count; +}; + +struct sudmac_desc { +	struct sudmac_regs hw; +	struct shdma_desc shdma_desc; +}; + +#define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan) +#define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc) +#define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \ +				 struct sudmac_device, shdma_dev.dma_dev) + +/* SUDMAC register */ +#define SUDMAC_CH0CFG		0x00 +#define SUDMAC_CH0BA		0x10 +#define SUDMAC_CH0BBC		0x18 +#define SUDMAC_CH0CA		0x20 +#define SUDMAC_CH0CBC		0x28 +#define SUDMAC_CH0DEN		0x30 +#define SUDMAC_DSTSCLR		0x38 +#define SUDMAC_DBUFCTRL		0x3C +#define SUDMAC_DINTCTRL		0x40 +#define SUDMAC_DINTSTS		0x44 +#define SUDMAC_DINTSTSCLR	0x48 +#define SUDMAC_CH0SHCTRL	0x50 + +/* Definitions for the sudmac_channel.config */ +#define SUDMAC_SENDBUFM	0x1000 /* b12: Transmit Buffer Mode */ +#define SUDMAC_RCVENDM	0x0100 /* b8: Receive Data Transfer End Mode */ +#define SUDMAC_LBA_WAIT	0x0030 /* b5-4: Local Bus Access Wait */ + +/* Definitions for the sudmac_channel.dint_end_bit */ +#define SUDMAC_CH1ENDE	0x0002 /* b1: Ch1 DMA Transfer End Int Enable */ +#define SUDMAC_CH0ENDE	0x0001 /* b0: Ch0 DMA Transfer End Int Enable */ + +#define SUDMAC_DRV_NAME "sudmac" + +static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg) +{ +	iowrite32(data, sc->base + reg); +} + +static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg) +{ +	return ioread32(sc->base + reg); +} + +static bool sudmac_is_busy(struct sudmac_chan *sc) +{ +	u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset); + +	if (den) +		return true; /* working */ + +	return false; /* waiting */ +} + +static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw, +			   struct shdma_desc *sdesc) +{ +	sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset); +	sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset); +	sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset); +} + +static void sudmac_start(struct sudmac_chan *sc) +{ +	u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); + +	sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL); +	sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset); +} + +static void sudmac_start_xfer(struct shdma_chan *schan, +			      struct shdma_desc *sdesc) +{ +	struct sudmac_chan *sc = to_chan(schan); +	struct sudmac_desc *sd = to_desc(sdesc); + +	sudmac_set_reg(sc, &sd->hw, sdesc); +	sudmac_start(sc); +} + +static bool sudmac_channel_busy(struct shdma_chan *schan) +{ +	struct sudmac_chan *sc = to_chan(schan); + +	return sudmac_is_busy(sc); +} + +static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id) +{ +} + +static const struct sudmac_slave_config *sudmac_find_slave( +	struct sudmac_chan *sc, int slave_id) +{ +	struct sudmac_device *sdev = to_sdev(sc); +	struct sudmac_pdata *pdata = sdev->pdata; +	const struct sudmac_slave_config *cfg; +	int i; + +	for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) +		if (cfg->slave_id == slave_id) +			return cfg; + +	return NULL; +} + +static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, +			    dma_addr_t slave_addr, bool try) +{ +	struct sudmac_chan *sc = to_chan(schan); +	const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id); + +	if (!cfg) +		return -ENODEV; + +	return 0; +} + +static inline void sudmac_dma_halt(struct sudmac_chan *sc) +{ +	u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); + +	sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset); +	sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL); +	sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR); +} + +static int sudmac_desc_setup(struct shdma_chan *schan, +			     struct shdma_desc *sdesc, +			     dma_addr_t src, dma_addr_t dst, size_t *len) +{ +	struct sudmac_chan *sc = to_chan(schan); +	struct sudmac_desc *sd = to_desc(sdesc); + +	dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n", +		__func__, &src, &dst, *len); + +	if (*len > schan->max_xfer_len) +		*len = schan->max_xfer_len; + +	if (dst) +		sd->hw.base_addr = dst; +	else if (src) +		sd->hw.base_addr = src; +	sd->hw.base_byte_count = *len; + +	return 0; +} + +static void sudmac_halt(struct shdma_chan *schan) +{ +	struct sudmac_chan *sc = to_chan(schan); + +	sudmac_dma_halt(sc); +} + +static bool sudmac_chan_irq(struct shdma_chan *schan, int irq) +{ +	struct sudmac_chan *sc = to_chan(schan); +	u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS); + +	if (!(dintsts & sc->dint_end_bit)) +		return false; + +	/* DMA stop */ +	sudmac_dma_halt(sc); + +	return true; +} + +static size_t sudmac_get_partial(struct shdma_chan *schan, +				 struct shdma_desc *sdesc) +{ +	struct sudmac_chan *sc = to_chan(schan); +	struct sudmac_desc *sd = to_desc(sdesc); +	u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset); + +	return sd->hw.base_byte_count - current_byte_count; +} + +static bool sudmac_desc_completed(struct shdma_chan *schan, +				  struct shdma_desc *sdesc) +{ +	struct sudmac_chan *sc = to_chan(schan); +	struct sudmac_desc *sd = to_desc(sdesc); +	u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset); + +	return sd->hw.base_addr + sd->hw.base_byte_count == current_addr; +} + +static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq, +			     unsigned long flags) +{ +	struct shdma_dev *sdev = &su_dev->shdma_dev; +	struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); +	struct sudmac_chan *sc; +	struct shdma_chan *schan; +	int err; + +	sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL); +	if (!sc) { +		dev_err(sdev->dma_dev.dev, +			"No free memory for allocating dma channels!\n"); +		return -ENOMEM; +	} + +	schan = &sc->shdma_chan; +	schan->max_xfer_len = 64 * 1024 * 1024 - 1; + +	shdma_chan_probe(sdev, schan, id); + +	sc->base = su_dev->chan_reg; + +	/* get platform_data */ +	sc->offset = su_dev->pdata->channel->offset; +	if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE) +		sc->cfg |= SUDMAC_SENDBUFM; +	if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE) +		sc->cfg |= SUDMAC_RCVENDM; +	sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT; + +	if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0) +		sc->dint_end_bit |= SUDMAC_CH0ENDE; +	if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1) +		sc->dint_end_bit |= SUDMAC_CH1ENDE; + +	/* set up channel irq */ +	if (pdev->id >= 0) +		snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d", +			 pdev->id, id); +	else +		snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id); + +	err = shdma_request_irq(schan, irq, flags, sc->dev_id); +	if (err) { +		dev_err(sdev->dma_dev.dev, +			"DMA channel %d request_irq failed %d\n", id, err); +		goto err_no_irq; +	} + +	return 0; + +err_no_irq: +	/* remove from dmaengine device node */ +	shdma_chan_remove(schan); +	return err; +} + +static void sudmac_chan_remove(struct sudmac_device *su_dev) +{ +	struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev; +	struct shdma_chan *schan; +	int i; + +	shdma_for_each_chan(schan, &su_dev->shdma_dev, i) { +		BUG_ON(!schan); + +		shdma_chan_remove(schan); +	} +	dma_dev->chancnt = 0; +} + +static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan) +{ +	/* SUDMAC doesn't need the address */ +	return 0; +} + +static struct shdma_desc *sudmac_embedded_desc(void *buf, int i) +{ +	return &((struct sudmac_desc *)buf)[i].shdma_desc; +} + +static const struct shdma_ops sudmac_shdma_ops = { +	.desc_completed = sudmac_desc_completed, +	.halt_channel = sudmac_halt, +	.channel_busy = sudmac_channel_busy, +	.slave_addr = sudmac_slave_addr, +	.desc_setup = sudmac_desc_setup, +	.set_slave = sudmac_set_slave, +	.setup_xfer = sudmac_setup_xfer, +	.start_xfer = sudmac_start_xfer, +	.embedded_desc = sudmac_embedded_desc, +	.chan_irq = sudmac_chan_irq, +	.get_partial = sudmac_get_partial, +}; + +static int sudmac_probe(struct platform_device *pdev) +{ +	struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev); +	int err, i; +	struct sudmac_device *su_dev; +	struct dma_device *dma_dev; +	struct resource *chan, *irq_res; + +	/* get platform data */ +	if (!pdata) +		return -ENODEV; + +	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); +	if (!irq_res) +		return -ENODEV; + +	err = -ENOMEM; +	su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device), +			      GFP_KERNEL); +	if (!su_dev) { +		dev_err(&pdev->dev, "Not enough memory\n"); +		return err; +	} + +	dma_dev = &su_dev->shdma_dev.dma_dev; + +	chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	su_dev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); +	if (IS_ERR(su_dev->chan_reg)) +		return PTR_ERR(su_dev->chan_reg); + +	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + +	su_dev->shdma_dev.ops = &sudmac_shdma_ops; +	su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc); +	err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num); +	if (err < 0) +		return err; + +	/* platform data */ +	su_dev->pdata = dev_get_platdata(&pdev->dev); + +	platform_set_drvdata(pdev, su_dev); + +	/* Create DMA Channel */ +	for (i = 0; i < pdata->channel_num; i++) { +		err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED); +		if (err) +			goto chan_probe_err; +	} + +	err = dma_async_device_register(&su_dev->shdma_dev.dma_dev); +	if (err < 0) +		goto chan_probe_err; + +	return err; + +chan_probe_err: +	sudmac_chan_remove(su_dev); + +	shdma_cleanup(&su_dev->shdma_dev); + +	return err; +} + +static int sudmac_remove(struct platform_device *pdev) +{ +	struct sudmac_device *su_dev = platform_get_drvdata(pdev); +	struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev; + +	dma_async_device_unregister(dma_dev); +	sudmac_chan_remove(su_dev); +	shdma_cleanup(&su_dev->shdma_dev); + +	return 0; +} + +static struct platform_driver sudmac_driver = { +	.driver		= { +		.owner	= THIS_MODULE, +		.name	= SUDMAC_DRV_NAME, +	}, +	.probe		= sudmac_probe, +	.remove		= sudmac_remove, +}; +module_platform_driver(sudmac_driver); + +MODULE_AUTHOR("Yoshihiro Shimoda"); +MODULE_DESCRIPTION("Renesas SUDMAC driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" SUDMAC_DRV_NAME);  | 
