From ff0e0f4f568e8d7593e0035c0c58067bcaf4ab07 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 22 Apr 2013 10:33:32 +0200 Subject: dma: of: Remove restriction that #dma-cells can't be 0 There is no sensible reason why #dma-cells shouldn't be allowed to be 0. It is completely up to the DMA controller how many additional parameters, besides the phandle, it needs to identify a channel. E.g. for DMA controller with only one channel or for DMA controllers which don't have a restriction on which channel can be used for which peripheral it completely legitimate to not require any additional parameters. Also fixes the following warning: drivers/dma/of-dma.c: In function 'of_dma_controller_register': drivers/dma/of-dma.c:67:7: warning: 'nbcells' may be used uninitialized in this function Signed-off-by: Lars-Peter Clausen Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/of-dma.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 7aa0864cd48..268cc8ab34e 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -64,7 +64,6 @@ int of_dma_controller_register(struct device_node *np, void *data) { struct of_dma *ofdma; - int nbcells; const __be32 *prop; if (!np || !of_dma_xlate) { @@ -77,18 +76,16 @@ int of_dma_controller_register(struct device_node *np, return -ENOMEM; prop = of_get_property(np, "#dma-cells", NULL); - if (prop) - nbcells = be32_to_cpup(prop); - - if (!prop || !nbcells) { - pr_err("%s: #dma-cells property is missing or invalid\n", + if (!prop) { + pr_err("%s: #dma-cells property is missing\n", __func__); kfree(ofdma); return -EINVAL; } + ofdma->of_node = np; - ofdma->of_dma_nbcells = nbcells; + ofdma->of_dma_nbcells = be32_to_cpup(prop); ofdma->of_dma_xlate = of_dma_xlate; ofdma->of_dma_data = data; -- cgit v1.2.3-18-g5258 From 8552bb4f16800d5ebc176a2cf5f2aa55b22731ea Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 22 Apr 2013 10:33:33 +0200 Subject: dma: of: Remove check on always true condition Both of_dma_nbcells field of the of_dma_controller and the args_count field of the dma_spec are initialized by parsing the #dma-cells attribute of their device tree node. So if the device tree nodes of a DMA controller and the dma_spec match this means that of_dma_nbcells and args_count will also match. So the second test in the of_dma_find_controller loop is redundant because given the first test yields true the second test will also yield true. So we can safely remove the test whether of_dma_nbcells matches args_count. Since this was the last user of the of_dma_nbcells field we can remove it altogether. Signed-off-by: Lars-Peter Clausen Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/of-dma.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 268cc8ab34e..75334bdd2c5 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -35,8 +35,7 @@ static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec) struct of_dma *ofdma; list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) - if ((ofdma->of_node == dma_spec->np) && - (ofdma->of_dma_nbcells == dma_spec->args_count)) + if (ofdma->of_node == dma_spec->np) return ofdma; pr_debug("%s: can't find DMA controller %s\n", __func__, @@ -64,7 +63,6 @@ int of_dma_controller_register(struct device_node *np, void *data) { struct of_dma *ofdma; - const __be32 *prop; if (!np || !of_dma_xlate) { pr_err("%s: not enough information provided\n", __func__); @@ -75,17 +73,7 @@ int of_dma_controller_register(struct device_node *np, if (!ofdma) return -ENOMEM; - prop = of_get_property(np, "#dma-cells", NULL); - if (!prop) { - pr_err("%s: #dma-cells property is missing\n", - __func__); - kfree(ofdma); - return -EINVAL; - } - - ofdma->of_node = np; - ofdma->of_dma_nbcells = be32_to_cpup(prop); ofdma->of_dma_xlate = of_dma_xlate; ofdma->of_dma_data = data; -- cgit v1.2.3-18-g5258 From 290ad0f9d954b445788bf26652b239c59cec2060 Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Sun, 26 May 2013 11:53:20 +0200 Subject: dma: imx-dma: Add oftree support Adding devicetree support for imx-dma driver. Use driver name for function 'imx_dma_is_general_purpose' because the devicename for devicetree initialized devices is different. Signed-off-by: Markus Pargmann Reviewed-by: Arnd Bergmann Reviewed-by: Shawn Guo Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index f28583370d0..34c54cf0823 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include #include @@ -186,6 +188,11 @@ struct imxdma_engine { enum imx_dma_type devtype; }; +struct imxdma_filter_data { + struct imxdma_engine *imxdma; + int request; +}; + static struct platform_device_id imx_dma_devtype[] = { { .name = "imx1-dma", @@ -202,6 +209,22 @@ static struct platform_device_id imx_dma_devtype[] = { }; MODULE_DEVICE_TABLE(platform, imx_dma_devtype); +static const struct of_device_id imx_dma_of_dev_id[] = { + { + .compatible = "fsl,imx1-dma", + .data = &imx_dma_devtype[IMX1_DMA], + }, { + .compatible = "fsl,imx21-dma", + .data = &imx_dma_devtype[IMX21_DMA], + }, { + .compatible = "fsl,imx27-dma", + .data = &imx_dma_devtype[IMX27_DMA], + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id); + static inline int is_imx1_dma(struct imxdma_engine *imxdma) { return imxdma->devtype == IMX1_DMA; @@ -996,13 +1019,50 @@ static void imxdma_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&imxdma->lock, flags); } +static bool imxdma_filter_fn(struct dma_chan *chan, void *param) +{ + struct imxdma_filter_data *fdata = param; + struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan); + + if (chan->device->dev != fdata->imxdma->dev) + return false; + + imxdma_chan->dma_request = fdata->request; + chan->private = NULL; + + return true; +} + +static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + int count = dma_spec->args_count; + struct imxdma_engine *imxdma = ofdma->of_dma_data; + struct imxdma_filter_data fdata = { + .imxdma = imxdma, + }; + + if (count != 1) + return NULL; + + fdata.request = dma_spec->args[0]; + + return dma_request_channel(imxdma->dma_device.cap_mask, + imxdma_filter_fn, &fdata); +} + static int __init imxdma_probe(struct platform_device *pdev) { struct imxdma_engine *imxdma; struct resource *res; + const struct of_device_id *of_id; int ret, i; int irq, irq_err; + of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev); + if (of_id) + pdev->id_entry = of_id->data; + imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL); if (!imxdma) return -ENOMEM; @@ -1136,8 +1196,19 @@ static int __init imxdma_probe(struct platform_device *pdev) goto err; } + if (pdev->dev.of_node) { + ret = of_dma_controller_register(pdev->dev.of_node, + imxdma_xlate, imxdma); + if (ret) { + dev_err(&pdev->dev, "unable to register of_dma_controller\n"); + goto err_of_dma_controller; + } + } + return 0; +err_of_dma_controller: + dma_async_device_unregister(&imxdma->dma_device); err: clk_disable_unprepare(imxdma->dma_ipg); clk_disable_unprepare(imxdma->dma_ahb); @@ -1150,6 +1221,9 @@ static int imxdma_remove(struct platform_device *pdev) dma_async_device_unregister(&imxdma->dma_device); + if (pdev->dev.of_node) + of_dma_controller_free(pdev->dev.of_node); + clk_disable_unprepare(imxdma->dma_ipg); clk_disable_unprepare(imxdma->dma_ahb); @@ -1159,6 +1233,7 @@ static int imxdma_remove(struct platform_device *pdev) static struct platform_driver imxdma_driver = { .driver = { .name = "imx-dma", + .of_match_table = imx_dma_of_dev_id, }, .id_table = imx_dma_devtype, .remove = imxdma_remove, -- cgit v1.2.3-18-g5258 From 5c6b3e7725384f02418d80e7dc32b1a690497004 Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Sun, 26 May 2013 11:53:21 +0200 Subject: DMA: imx-dma: imxdma->dev used uninitialized imxdma->dev is used for dev_warn before it was set. Signed-off-by: Markus Pargmann Reviewed-by: Shawn Guo Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 34c54cf0823..ff2aab973b4 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -1067,6 +1067,7 @@ static int __init imxdma_probe(struct platform_device *pdev) if (!imxdma) return -ENOMEM; + imxdma->dev = &pdev->dev; imxdma->devtype = pdev->id_entry->driver_data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1171,7 +1172,6 @@ static int __init imxdma_probe(struct platform_device *pdev) &imxdma->dma_device.channels); } - imxdma->dev = &pdev->dev; imxdma->dma_device.dev = &pdev->dev; imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; -- cgit v1.2.3-18-g5258 From ea7e79063e604c89b16b819d2e88b20c421d9514 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 10 May 2013 15:19:13 +0200 Subject: dmaengine: at_hdmac/trivial: correct typo in comment Signed-off-by: Nicolas Ferre Acked-by: Jean-Christophe PLAGNIOL-VILLARD Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index e923cda930f..cd494209352 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1120,7 +1120,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) */ BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); - /* if cfg configuration specified take it instad of default */ + /* if cfg configuration specified take it instead of default */ if (atslave->cfg) cfg = atslave->cfg; } -- cgit v1.2.3-18-g5258 From 72ae6e4b31e40397eaa81007b39a1074638a6798 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 10 May 2013 15:19:14 +0200 Subject: dmaengine: at_hdmac: extend hardware handshaking interface identification Peripheral handshaking identification numbers can be bigger than 15, so new fields have been created in the CFG register. Add macros to take this modification into account and use them in at_dma_xlate() function. Signed-off-by: Nicolas Ferre Acked-by: Jean-Christophe PLAGNIOL-VILLARD Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index cd494209352..78c3fb4b4e4 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1230,6 +1230,8 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, per_id = dma_spec->args[1]; atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id) + | ATC_DST_PER_MSB(per_id) + | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id); atslave->dma_dev = &dmac_pdev->dev; -- cgit v1.2.3-18-g5258 From 6c22770f644bf23aecc11fedd7b305488a861bfc Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 10 May 2013 15:19:15 +0200 Subject: dmaengine: at_hdmac/trivial: rearrange CFG register bits assignment No modification in CFG register configuration, just rearrange bits directives to group logically and make it more readable. Signed-off-by: Nicolas Ferre Acked-by: Jean-Christophe PLAGNIOL-VILLARD Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 78c3fb4b4e4..9e1ad73a78c 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1228,11 +1228,10 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, * ignored depending on DMA transfer direction. */ per_id = dma_spec->args[1]; - atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW - | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id) - | ATC_DST_PER_MSB(per_id) - | ATC_SRC_PER_MSB(per_id) - | ATC_SRC_PER(per_id); + atslave->cfg = ATC_FIFOCFG_HALFFIFO + | ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW + | ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id) + | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id); atslave->dma_dev = &dmac_pdev->dev; chan = dma_request_channel(mask, at_dma_filter, atslave); -- cgit v1.2.3-18-g5258 From d088c33b646e9f3564eea7a057a2cb697c18bcd0 Mon Sep 17 00:00:00 2001 From: Elen Song Date: Fri, 10 May 2013 11:00:50 +0800 Subject: DMA: AT91: Get transfer width In one dma transfer, the data transfer width can be configured and it is limited by source or destination peripheral width, tx_width will save the transfer width, but for memcpy, either source or destination transfer width is taken as tx_width. Signed-off-by: Elen Song Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 3 +++ drivers/dma/at_hdmac_regs.h | 2 ++ 2 files changed, 5 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 9e1ad73a78c..4c101a9dd3c 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -615,6 +615,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, /* First descriptor of the chain embedds additional information */ first->txd.cookie = -EBUSY; first->len = len; + first->tx_width = src_width; /* set end-of-link to the last link descriptor of list*/ set_desc_eol(desc); @@ -761,6 +762,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, /* First descriptor of the chain embedds additional information */ first->txd.cookie = -EBUSY; first->len = total_len; + first->tx_width = reg_width; /* first link descriptor of list is responsible of flags */ first->txd.flags = flags; /* client is in control of this ack */ @@ -919,6 +921,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, /* First descriptor of the chain embedds additional information */ first->txd.cookie = -EBUSY; first->len = buf_len; + first->tx_width = reg_width; return &first->txd; diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index c604d26fd4d..3679933fb64 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -182,6 +182,7 @@ struct at_lli { * @txd: support for the async_tx api * @desc_node: node on the channed descriptors list * @len: total transaction bytecount + * @tx_width: transfer width */ struct at_desc { /* FIRST values the hardware uses */ @@ -192,6 +193,7 @@ struct at_desc { struct dma_async_tx_descriptor txd; struct list_head desc_node; size_t len; + u32 tx_width; }; static inline struct at_desc * -- cgit v1.2.3-18-g5258 From d48de6f1a81b3d10de0f5765aff1b3bd788617b0 Mon Sep 17 00:00:00 2001 From: Elen Song Date: Fri, 10 May 2013 11:01:46 +0800 Subject: DMA: AT91: Get residual bytes in dma buffer Add support for returning the residue for current transfer cookie by reading the transfered buffer size(BTSIZE) in CTRLA register. For a single buffer cookie, the descriptor length minus BTSIZE can get the residue. For a lli cookie, remain_desc will record remain descriptor length when last descriptor finish, the remain_desc minus BTSIZE can get the current residue. If the cookie has completed successfully, the residue will be zero. If the cookie is in progress, it will be the number of bytes yet to be transferred. If get residue error, the cookie will be turn into error status. Check dma fifo to see if data remain, let issue pending finish remain work if there is. Signed-off-by: Elen Song Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 130 ++++++++++++++++++++++++++++++++++++++------ drivers/dma/at_hdmac_regs.h | 3 + 2 files changed, 115 insertions(+), 18 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 4c101a9dd3c..5ce89368a8d 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -54,6 +54,7 @@ MODULE_PARM_DESC(init_nr_desc_per_channel, /* prototypes */ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); +static void atc_issue_pending(struct dma_chan *chan); /*----------------------------------------------------------------------*/ @@ -230,6 +231,94 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) vdbg_dump_regs(atchan); } +/* + * atc_get_current_descriptors - + * locate the descriptor which equal to physical address in DSCR + * @atchan: the channel we want to start + * @dscr_addr: physical descriptor address in DSCR + */ +static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan, + u32 dscr_addr) +{ + struct at_desc *desc, *_desc, *child, *desc_cur = NULL; + + list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { + if (desc->lli.dscr == dscr_addr) { + desc_cur = desc; + break; + } + + list_for_each_entry(child, &desc->tx_list, desc_node) { + if (child->lli.dscr == dscr_addr) { + desc_cur = child; + break; + } + } + } + + return desc_cur; +} + +/* + * atc_get_bytes_left - + * Get the number of bytes residue in dma buffer, + * @chan: the channel we want to start + */ +static int atc_get_bytes_left(struct dma_chan *chan) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma *atdma = to_at_dma(chan->device); + int chan_id = atchan->chan_common.chan_id; + struct at_desc *desc_first = atc_first_active(atchan); + struct at_desc *desc_cur; + int ret = 0, count = 0; + + /* + * Initialize necessary values in the first time. + * remain_desc record remain desc length. + */ + if (atchan->remain_desc == 0) + /* First descriptor embedds the transaction length */ + atchan->remain_desc = desc_first->len; + + /* + * This happens when current descriptor transfer complete. + * The residual buffer size should reduce current descriptor length. + */ + if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) { + clear_bit(ATC_IS_BTC, &atchan->status); + desc_cur = atc_get_current_descriptors(atchan, + channel_readl(atchan, DSCR)); + if (!desc_cur) { + ret = -EINVAL; + goto out; + } + atchan->remain_desc -= (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) + << (desc_first->tx_width); + if (atchan->remain_desc < 0) { + ret = -EINVAL; + goto out; + } else + ret = atchan->remain_desc; + } else { + /* + * Get residual bytes when current + * descriptor transfer in progress. + */ + count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX) + << (desc_first->tx_width); + ret = atchan->remain_desc - count; + } + /* + * Check fifo empty. + */ + if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id))) + atc_issue_pending(chan); + +out: + return ret; +} + /** * atc_chain_complete - finish work for one transaction chain * @atchan: channel we work on @@ -496,6 +585,8 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) /* Give information to tasklet */ set_bit(ATC_IS_ERROR, &atchan->status); } + if (pending & AT_DMA_BTC(i)) + set_bit(ATC_IS_BTC, &atchan->status); tasklet_schedule(&atchan->tasklet); ret = IRQ_HANDLED; } @@ -1035,34 +1126,35 @@ atc_tx_status(struct dma_chan *chan, struct dma_tx_state *txstate) { struct at_dma_chan *atchan = to_at_dma_chan(chan); - dma_cookie_t last_used; - dma_cookie_t last_complete; unsigned long flags; enum dma_status ret; - - spin_lock_irqsave(&atchan->lock, flags); + int bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) { - atc_cleanup_descriptors(atchan); + if (ret == DMA_SUCCESS) + return ret; + /* + * There's no point calculating the residue if there's + * no txstate to store the value. + */ + if (!txstate) + return DMA_ERROR; - ret = dma_cookie_status(chan, cookie, txstate); - } + spin_lock_irqsave(&atchan->lock, flags); - last_complete = chan->completed_cookie; - last_used = chan->cookie; + /* Get number of bytes left in the active transactions */ + bytes = atc_get_bytes_left(chan); spin_unlock_irqrestore(&atchan->lock, flags); - if (ret != DMA_SUCCESS) - dma_set_residue(txstate, atc_first_active(atchan)->len); - - if (atc_chan_is_paused(atchan)) - ret = DMA_PAUSED; + if (unlikely(bytes < 0)) { + dev_vdbg(chan2dev(chan), "get residual bytes error\n"); + return DMA_ERROR; + } else + dma_set_residue(txstate, bytes); - dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", - ret, cookie, last_complete ? last_complete : 0, - last_used ? last_used : 0); + dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n", + ret, cookie, bytes); return ret; } @@ -1146,6 +1238,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) spin_lock_irqsave(&atchan->lock, flags); atchan->descs_allocated = i; + atchan->remain_desc = 0; list_splice(&tmp_list, &atchan->free_list); dma_cookie_init(chan); spin_unlock_irqrestore(&atchan->lock, flags); @@ -1188,6 +1281,7 @@ static void atc_free_chan_resources(struct dma_chan *chan) list_splice_init(&atchan->free_list, &list); atchan->descs_allocated = 0; atchan->status = 0; + atchan->remain_desc = 0; dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); } diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 3679933fb64..f31d647acdf 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -213,6 +213,7 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd) enum atc_status { ATC_IS_ERROR = 0, ATC_IS_PAUSED = 1, + ATC_IS_BTC = 2, ATC_IS_CYCLIC = 24, }; @@ -230,6 +231,7 @@ enum atc_status { * @save_cfg: configuration register that is saved on suspend/resume cycle * @save_dscr: for cyclic operations, preserve next descriptor address in * the cyclic list on suspend/resume cycle + * @remain_desc: to save remain desc length * @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG * @lock: serializes enqueue/dequeue operations to descriptors lists * @active_list: list of descriptors dmaengine is being running on @@ -248,6 +250,7 @@ struct at_dma_chan { struct tasklet_struct tasklet; u32 save_cfg; u32 save_dscr; + u32 remain_desc; struct dma_slave_config dma_sconfig; spinlock_t lock; -- cgit v1.2.3-18-g5258 From dd3daca162f7411448dd80a8872a002c43cfd8e5 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 24 May 2013 10:10:13 +0900 Subject: dma: use platform_{get,set}_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_{get,set}_drvdata() with &pdev->dev, so we can directly pass a struct platform_device. Also, unnecessary dev_set_drvdata() is removed, because the driver core clears the driver data to NULL after device_release or on probe failure. Signed-off-by: Jingoo Han Signed-off-by: Vinod Koul --- drivers/dma/fsldma.c | 5 ++--- drivers/dma/ppc4xx/adma.c | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 4fc2980556a..49e8fbdb898 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1368,7 +1368,7 @@ static int fsldma_of_probe(struct platform_device *op) dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); - dev_set_drvdata(&op->dev, fdev); + platform_set_drvdata(op, fdev); /* * We cannot use of_platform_bus_probe() because there is no @@ -1417,7 +1417,7 @@ static int fsldma_of_remove(struct platform_device *op) struct fsldma_device *fdev; unsigned int i; - fdev = dev_get_drvdata(&op->dev); + fdev = platform_get_drvdata(op); dma_async_device_unregister(&fdev->common); fsldma_free_irqs(fdev); @@ -1428,7 +1428,6 @@ static int fsldma_of_remove(struct platform_device *op) } iounmap(fdev->regs); - dev_set_drvdata(&op->dev, NULL); kfree(fdev); return 0; diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 5d3d95569a1..e68c51d13cb 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -4481,7 +4481,7 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev) adev->dev = &ofdev->dev; adev->common.dev = &ofdev->dev; INIT_LIST_HEAD(&adev->common.channels); - dev_set_drvdata(&ofdev->dev, adev); + platform_set_drvdata(ofdev, adev); /* create a channel */ chan = kzalloc(sizeof(*chan), GFP_KERNEL); @@ -4594,14 +4594,13 @@ out: */ static int ppc440spe_adma_remove(struct platform_device *ofdev) { - struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev); + struct ppc440spe_adma_device *adev = platform_get_drvdata(ofdev); struct device_node *np = ofdev->dev.of_node; struct resource res; struct dma_chan *chan, *_chan; struct ppc_dma_chan_ref *ref, *_ref; struct ppc440spe_adma_chan *ppc440spe_chan; - dev_set_drvdata(&ofdev->dev, NULL); if (adev->id < PPC440SPE_ADMA_ENGINES_NUM) ppc440spe_adma_devices[adev->id] = -1; -- cgit v1.2.3-18-g5258 From 3208b3701b98dcd14f0d5f0a36dd33d21b0b458f Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 24 May 2013 16:37:27 -0300 Subject: dma: mxs-dma: Staticize mxs_dma_xlate Fix the following sparse warning: drivers/dma/mxs-dma.c:696:17: warning: symbol 'mxs_dma_xlate' was not declared. Should it be static? Signed-off-by: Fabio Estevam Acked-by: Shawn Guo Signed-off-by: Vinod Koul --- drivers/dma/mxs-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index b48a79c2884..719593002ab 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -693,7 +693,7 @@ static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param) return true; } -struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, +static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data; -- cgit v1.2.3-18-g5258 From 36c6df5062568f0b923930b63e2e477cb3a391bd Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Mon, 6 May 2013 12:53:33 +0900 Subject: dma: at_hdmac: remove unnecessary platform_set_drvdata() The driver core clears the driver data to NULL after device_release or on probe failure, since commit 0998d0631001288a5974afc0b2a5f568bcdecb4d (device-core: Ensure drvdata = NULL when no driver is bound). Thus, it is not needed to manually clear the device driver data to NULL. Signed-off-by: Jingoo Han Acked-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 5ce89368a8d..6db5228f413 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1570,7 +1570,6 @@ err_of_dma_controller_register: dma_async_device_unregister(&atdma->dma_common); dma_pool_destroy(atdma->dma_desc_pool); err_pool_create: - platform_set_drvdata(pdev, NULL); free_irq(platform_get_irq(pdev, 0), atdma); err_irq: clk_disable(atdma->clk); @@ -1595,7 +1594,6 @@ static int at_dma_remove(struct platform_device *pdev) dma_async_device_unregister(&atdma->dma_common); dma_pool_destroy(atdma->dma_desc_pool); - platform_set_drvdata(pdev, NULL); free_irq(platform_get_irq(pdev, 0), atdma); list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, -- cgit v1.2.3-18-g5258 From c1a9d391adc7feb219edd354deacb587b26cad06 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Mon, 6 May 2013 12:54:48 +0900 Subject: dma: timb_dma: remove unnecessary platform_set_drvdata() The driver core clears the driver data to NULL after device_release or on probe failure, since commit 0998d0631001288a5974afc0b2a5f568bcdecb4d (device-core: Ensure drvdata = NULL when no driver is bound). Thus, it is not needed to manually clear the device driver data to NULL. Signed-off-by: Jingoo Han Signed-off-by: Vinod Koul --- drivers/dma/timb_dma.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 26107ba6edb..0ef43c136aa 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -811,8 +811,6 @@ static int td_remove(struct platform_device *pdev) kfree(td); release_mem_region(iomem->start, resource_size(iomem)); - platform_set_drvdata(pdev, NULL); - dev_dbg(&pdev->dev, "Removed...\n"); return 0; } -- cgit v1.2.3-18-g5258 From 8004cbb481494c166596b0d469a6c777415e18f6 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 9 May 2013 13:19:40 +0400 Subject: dw_dmac: remove inline marking of EXPORT_SYMBOL functions EXPORT_SYMBOL and inline directives are contradictory to each other. The patch fixes this inconsistency. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Denis Efremov Acked-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 2e5deaa82b6..724083d02b3 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -556,14 +556,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) /* --------------------- Cyclic DMA API extensions -------------------- */ -inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) +dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); return channel_readl(dwc, SAR); } EXPORT_SYMBOL(dw_dma_get_src_addr); -inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) +dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); return channel_readl(dwc, DAR); -- cgit v1.2.3-18-g5258 From ac7ae754d592571478959833796b7bdf1a3c08da Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sat, 11 May 2013 20:30:52 +0400 Subject: dma: tegra20-apbdma: err message correction Fixed err msg params order on irq request fail. Signed-off-by: Dmitry Osipenko Acked-by: Stephen Warren Acked-by: Laxman Dewangan Signed-off-by: Vinod Koul --- drivers/dma/tegra20-apb-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 33f59ecd256..5953547a5f7 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1334,7 +1334,7 @@ static int tegra_dma_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "request_irq failed with err %d channel %d\n", - i, ret); + ret, i); goto err_irq; } -- cgit v1.2.3-18-g5258 From 7bdc1e272a471062e8f310137c896e2355b46d13 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sat, 11 May 2013 20:30:53 +0400 Subject: dma: tegra: avoid channel lock up after free Lock scenario: Channel 1 was allocated and prepared as slave_sg, used and freed. Now preparation of cyclic dma on channel 1 will fail with err "DMA configuration conflict" because tdc->isr_handler still setted to handle_once_dma_done. This happens because tegra_dma_abort_all() won't be called on channel freeing if pending list is empty and channel not busy. We need to clear isr_handler on channel freeing to avoid locking. Signed-off-by: Dmitry Osipenko Acked-by: Stephen Warren Acked-by: Laxman Dewangan Signed-off-by: Vinod Koul --- drivers/dma/tegra20-apb-dma.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/dma') diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 5953547a5f7..f137914d7b1 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1191,6 +1191,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) list_splice_init(&tdc->free_dma_desc, &dma_desc_list); INIT_LIST_HEAD(&tdc->cb_desc); tdc->config_init = false; + tdc->isr_handler = NULL; spin_unlock_irqrestore(&tdc->lock, flags); while (!list_empty(&dma_desc_list)) { -- cgit v1.2.3-18-g5258 From add93b578edda2a952b9b481ce8da2a9dc412cee Mon Sep 17 00:00:00 2001 From: Rongjun Ying Date: Tue, 14 May 2013 23:03:20 +0800 Subject: dmaengine: sirf: set dma residue based on the current dma transfer position read SIRFSOC_DMA_CH_ADDR register to get current dma transfer position, then update dma residue so that things like ALSA drivers work as ALSA drivers need the right residue value. Signed-off-by: Rongjun Ying Signed-off-by: Barry Song Signed-off-by: Vinod Koul --- drivers/dma/sirf-dma.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 1765a0a2736..716b23e4f32 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -466,12 +466,29 @@ static enum dma_status sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { + struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); unsigned long flags; enum dma_status ret; + struct sirfsoc_dma_desc *sdesc; + int cid = schan->chan.chan_id; + unsigned long dma_pos; + unsigned long dma_request_bytes; + unsigned long residue; spin_lock_irqsave(&schan->lock, flags); + + sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, + node); + dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) * + (sdesc->width * SIRFSOC_DMA_WORD_LEN); + ret = dma_cookie_status(chan, cookie, txstate); + dma_pos = readl_relaxed(sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) + << 2; + residue = dma_request_bytes - (dma_pos - sdesc->addr); + dma_set_residue(txstate, residue); + spin_unlock_irqrestore(&schan->lock, flags); return ret; -- cgit v1.2.3-18-g5258 From 9479e17c9bb455c01b369d294e01de8fa9b0a8d3 Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Thu, 30 May 2013 22:23:32 +0800 Subject: dma: imx-sdma: move to generic device tree bindings Update imx-sdma driver to adopt generic DMA device tree bindings. It calls of_dma_controller_register() with imx-sdma specific of_dma_xlate to get the generic DMA device tree helper support. The #dma-cells for imx-sdma must be 3, which includes request ID, peripheral type and priority. The existing way of requesting channel, clients directly call dma_request_channel(), still work there, and will be removed after all imx-sdma clients get converted to generic DMA device tree helper. Signed-off-by: Shawn Guo Signed-off-by: Vinod Koul Acked-by: Arnd Bergmann --- drivers/dma/imx-sdma.c | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 092867bf795..1e44b8cf95d 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -1296,6 +1297,35 @@ err_dma_alloc: return ret; } +static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) +{ + struct imx_dma_data *data = fn_param; + + if (!imx_dma_is_general_purpose(chan)) + return false; + + chan->private = data; + + return true; +} + +static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct sdma_engine *sdma = ofdma->of_dma_data; + dma_cap_mask_t mask = sdma->dma_device.cap_mask; + struct imx_dma_data data; + + if (dma_spec->args_count != 3) + return NULL; + + data.dma_request = dma_spec->args[0]; + data.peripheral_type = dma_spec->args[1]; + data.priority = dma_spec->args[2]; + + return dma_request_channel(mask, sdma_filter_fn, &data); +} + static int __init sdma_probe(struct platform_device *pdev) { const struct of_device_id *of_id = @@ -1443,10 +1473,20 @@ static int __init sdma_probe(struct platform_device *pdev) goto err_init; } + if (np) { + ret = of_dma_controller_register(np, sdma_xlate, sdma); + if (ret) { + dev_err(&pdev->dev, "failed to register controller\n"); + goto err_register; + } + } + dev_info(sdma->dev, "initialized\n"); return 0; +err_register: + dma_async_device_unregister(&sdma->dma_device); err_init: kfree(sdma->script_addrs); err_alloc: -- cgit v1.2.3-18-g5258 From 09677176610e7c3ed8ddb302fd24bbb59bdbf205 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 10 Jun 2013 19:34:37 +0100 Subject: dma: pl330: rip out broken, redundant ID probing The PL330 driver probes the peripheral and primecell IDs of the device to make sure that it is indeed an AMBA PL330. However, it does this by making byte accesses to a device mapping of the word-aligned ID registers, which is either UNPREDICTABLE or generates an alignment fault (depending on the presence of the virtualisation extensions). Rather than fix this code, we can actually rip most of it out and let the AMBA bus driver correctly do the probing for us. Cc: Jassi Brar Cc: Vinod Koul Signed-off-by: Will Deacon Acked-by: Jassi Brar Acked-by: Grant Likely Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 27 +++------------------------ 1 file changed, 3 insertions(+), 24 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index a17553f7c02..ac04335ef44 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -157,7 +157,6 @@ enum pl330_reqtype { #define PERIPH_REV_R0P0 0 #define PERIPH_REV_R1P0 1 #define PERIPH_REV_R1P1 2 -#define PCELL_ID 0xff0 #define CR0_PERIPH_REQ_SET (1 << 0) #define CR0_BOOT_EN_SET (1 << 1) @@ -193,8 +192,6 @@ enum pl330_reqtype { #define INTEG_CFG 0x0 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12)) -#define PCELL_ID_VAL 0xb105f00d - #define PL330_STATE_STOPPED (1 << 0) #define PL330_STATE_EXECUTING (1 << 1) #define PL330_STATE_WFE (1 << 2) @@ -292,7 +289,6 @@ static unsigned cmd_line; /* Populated by the PL330 core driver for DMA API driver's info */ struct pl330_config { u32 periph_id; - u32 pcell_id; #define DMAC_MODE_NS (1 << 0) unsigned int mode; unsigned int data_bus_width:10; /* In number of bits */ @@ -650,19 +646,6 @@ static inline bool _manager_ns(struct pl330_thread *thrd) return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false; } -static inline u32 get_id(struct pl330_info *pi, u32 off) -{ - void __iomem *regs = pi->base; - u32 id = 0; - - id |= (readb(regs + off + 0x0) << 0); - id |= (readb(regs + off + 0x4) << 8); - id |= (readb(regs + off + 0x8) << 16); - id |= (readb(regs + off + 0xc) << 24); - - return id; -} - static inline u32 get_revision(u32 periph_id) { return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK; @@ -1986,9 +1969,6 @@ static void read_dmac_config(struct pl330_info *pi) pi->pcfg.num_events = val; pi->pcfg.irq_ns = readl(regs + CR3); - - pi->pcfg.periph_id = get_id(pi, PERIPH_ID); - pi->pcfg.pcell_id = get_id(pi, PCELL_ID); } static inline void _reset_thread(struct pl330_thread *thrd) @@ -2098,10 +2078,8 @@ static int pl330_add(struct pl330_info *pi) regs = pi->base; /* Check if we can handle this DMAC */ - if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL - || get_id(pi, PCELL_ID) != PCELL_ID_VAL) { - dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n", - get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID)); + if ((pi->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) { + dev_err(pi->dev, "PERIPH_ID 0x%x !\n", pi->pcfg.periph_id); return -EINVAL; } @@ -2916,6 +2894,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) if (ret) return ret; + pi->pcfg.periph_id = adev->periphid; ret = pl330_add(pi); if (ret) goto probe_err1; -- cgit v1.2.3-18-g5258 From fed8c45727abd273fd74b3e78b35be4929121334 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 10 Jun 2013 19:34:38 +0100 Subject: dma: pl330: use dma_addr_t for describing bus addresses The microcode bus address (pl330_dmac.mcode_bus) is currently a u32, which fails to compile when building on a system with 64-bit bus addresses. This patch uses dma_addr_t to represent the address instead. Cc: Jassi Brar Cc: Vinod Koul Signed-off-by: Will Deacon Acked-by: Jassi Brar Acked-by: Grant Likely Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index ac04335ef44..bd69cc47150 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -501,7 +501,7 @@ struct pl330_dmac { /* Maximum possible events/irqs */ int events[32]; /* BUS address of MicroCode buffer */ - u32 mcode_bus; + dma_addr_t mcode_bus; /* CPU address of MicroCode buffer */ void *mcode_cpu; /* List of all Channel threads */ -- cgit v1.2.3-18-g5258 From 0b95961e03ecee31d6151db79cc0826e702d1e0a Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 5 Jun 2013 15:26:43 +0300 Subject: dw_dmac: don't check resource with devm_ioremap_resource devm_ioremap_resource does sanity checks on the given resource. No need to duplicate this in the driver. Signed-off-by: Andy Shevchenko Acked-by: Viresh Kumar Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 724083d02b3..2b65ba614e6 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -1667,14 +1667,11 @@ static int dw_probe(struct platform_device *pdev) int err; int i; - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!io) - return -EINVAL; - irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(&pdev->dev, io); if (IS_ERR(regs)) return PTR_ERR(regs); -- cgit v1.2.3-18-g5258 From 61a7649620d54a037c612f9a713abe5178cddc65 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 5 Jun 2013 15:26:44 +0300 Subject: dma: move dw_dmac driver to an own directory The dw_dmac driver is going to be split into multiple files. To make this more convenient move it to an own directory. Signed-off-by: Andy Shevchenko Acked-by: Viresh Kumar Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 20 +- drivers/dma/Makefile | 2 +- drivers/dma/dw/Kconfig | 23 + drivers/dma/dw/Makefile | 1 + drivers/dma/dw/dw_dmac.c | 1969 +++++++++++++++++++++++++++++++++++++++++ drivers/dma/dw/dw_dmac_regs.h | 311 +++++++ drivers/dma/dw_dmac.c | 1969 ----------------------------------------- drivers/dma/dw_dmac_regs.h | 311 ------- 8 files changed, 2306 insertions(+), 2300 deletions(-) create mode 100644 drivers/dma/dw/Kconfig create mode 100644 drivers/dma/dw/Makefile create mode 100644 drivers/dma/dw/dw_dmac.c create mode 100644 drivers/dma/dw/dw_dmac_regs.h delete mode 100644 drivers/dma/dw_dmac.c delete mode 100644 drivers/dma/dw_dmac_regs.h (limited to 'drivers/dma') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index e9924898043..146a1d864a7 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -79,25 +79,7 @@ config INTEL_IOP_ADMA help Enable support for the Intel(R) IOP Series RAID engines. -config DW_DMAC - tristate "Synopsys DesignWare AHB DMA support" - depends on GENERIC_HARDIRQS - select DMA_ENGINE - default y if CPU_AT32AP7000 - help - Support the Synopsys DesignWare AHB DMA controller. This - can be integrated in chips such as the Atmel AT32ap7000. - -config DW_DMAC_BIG_ENDIAN_IO - bool "Use big endian I/O register access" - default y if AVR32 - depends on DW_DMAC - help - Say yes here to use big endian I/O access when reading and writing - to the DMA controller registers. This is needed on some platforms, - like the Atmel AVR32 architecture. - - If unsure, use the default setting. +source "drivers/dma/dw/Kconfig" config AT_HDMAC tristate "Atmel AHB DMA support" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index a2b0df591f9..ac44ca0d468 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -15,7 +15,7 @@ obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ obj-$(CONFIG_MV_XOR) += mv_xor.o -obj-$(CONFIG_DW_DMAC) += dw_dmac.o +obj-$(CONFIG_DW_DMAC) += dw/ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig new file mode 100644 index 00000000000..38a215af5cc --- /dev/null +++ b/drivers/dma/dw/Kconfig @@ -0,0 +1,23 @@ +# +# DMA engine configuration for dw +# + +config DW_DMAC + tristate "Synopsys DesignWare AHB DMA support" + depends on GENERIC_HARDIRQS + select DMA_ENGINE + default y if CPU_AT32AP7000 + help + Support the Synopsys DesignWare AHB DMA controller. This + can be integrated in chips such as the Atmel AT32ap7000. + +config DW_DMAC_BIG_ENDIAN_IO + bool "Use big endian I/O register access" + default y if AVR32 + depends on DW_DMAC + help + Say yes here to use big endian I/O access when reading and writing + to the DMA controller registers. This is needed on some platforms, + like the Atmel AVR32 architecture. + + If unsure, use the default setting. diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile new file mode 100644 index 00000000000..dd8d9936bee --- /dev/null +++ b/drivers/dma/dw/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_DW_DMAC) += dw_dmac.o diff --git a/drivers/dma/dw/dw_dmac.c b/drivers/dma/dw/dw_dmac.c new file mode 100644 index 00000000000..15f3f4f79c1 --- /dev/null +++ b/drivers/dma/dw/dw_dmac.c @@ -0,0 +1,1969 @@ +/* + * Core driver for the Synopsys DesignWare DMA Controller + * + * Copyright (C) 2007-2008 Atmel Corporation + * Copyright (C) 2010-2011 ST Microelectronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../dmaengine.h" +#include "dw_dmac_regs.h" + +/* + * This supports the Synopsys "DesignWare AHB Central DMA Controller", + * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all + * of which use ARM any more). See the "Databook" from Synopsys for + * information beyond what licensees probably provide. + * + * The driver has currently been tested only with the Atmel AT32AP7000, + * which does not support descriptor writeback. + */ + +static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave) +{ + return slave ? slave->dst_master : 0; +} + +static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) +{ + return slave ? slave->src_master : 1; +} + +static inline void dwc_set_masters(struct dw_dma_chan *dwc) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + struct dw_dma_slave *dws = dwc->chan.private; + unsigned char mmax = dw->nr_masters - 1; + + if (dwc->request_line == ~0) { + dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws)); + dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws)); + } +} + +#define DWC_DEFAULT_CTLLO(_chan) ({ \ + struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ + struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ + bool _is_slave = is_slave_direction(_dwc->direction); \ + u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ + DW_DMA_MSIZE_16; \ + u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ + DW_DMA_MSIZE_16; \ + \ + (DWC_CTLL_DST_MSIZE(_dmsize) \ + | DWC_CTLL_SRC_MSIZE(_smsize) \ + | DWC_CTLL_LLP_D_EN \ + | DWC_CTLL_LLP_S_EN \ + | DWC_CTLL_DMS(_dwc->dst_master) \ + | DWC_CTLL_SMS(_dwc->src_master)); \ + }) + +/* + * Number of descriptors to allocate for each channel. This should be + * made configurable somehow; preferably, the clients (at least the + * ones using slave transfers) should be able to give us a hint. + */ +#define NR_DESCS_PER_CHANNEL 64 + +/*----------------------------------------------------------------------*/ + +static struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} +static struct device *chan2parent(struct dma_chan *chan) +{ + return chan->dev->device.parent; +} + +static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) +{ + return to_dw_desc(dwc->active_list.next); +} + +static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) +{ + struct dw_desc *desc, *_desc; + struct dw_desc *ret = NULL; + unsigned int i = 0; + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { + i++; + if (async_tx_test_ack(&desc->txd)) { + list_del(&desc->desc_node); + ret = desc; + break; + } + dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); + } + spin_unlock_irqrestore(&dwc->lock, flags); + + dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); + + return ret; +} + +/* + * Move a descriptor, including any children, to the free list. + * `desc' must not be on any lists. + */ +static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) +{ + unsigned long flags; + + if (desc) { + struct dw_desc *child; + + spin_lock_irqsave(&dwc->lock, flags); + list_for_each_entry(child, &desc->tx_list, desc_node) + dev_vdbg(chan2dev(&dwc->chan), + "moving child desc %p to freelist\n", + child); + list_splice_init(&desc->tx_list, &dwc->free_list); + dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); + list_add(&desc->desc_node, &dwc->free_list); + spin_unlock_irqrestore(&dwc->lock, flags); + } +} + +static void dwc_initialize(struct dw_dma_chan *dwc) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + struct dw_dma_slave *dws = dwc->chan.private; + u32 cfghi = DWC_CFGH_FIFO_MODE; + u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); + + if (dwc->initialized == true) + return; + + if (dws) { + /* + * We need controller-specific data to set up slave + * transfers. + */ + BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); + + cfghi = dws->cfg_hi; + cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; + } else { + if (dwc->direction == DMA_MEM_TO_DEV) + cfghi = DWC_CFGH_DST_PER(dwc->request_line); + else if (dwc->direction == DMA_DEV_TO_MEM) + cfghi = DWC_CFGH_SRC_PER(dwc->request_line); + } + + channel_writel(dwc, CFG_LO, cfglo); + channel_writel(dwc, CFG_HI, cfghi); + + /* Enable interrupts */ + channel_set_bit(dw, MASK.XFER, dwc->mask); + channel_set_bit(dw, MASK.ERROR, dwc->mask); + + dwc->initialized = true; +} + +/*----------------------------------------------------------------------*/ + +static inline unsigned int dwc_fast_fls(unsigned long long v) +{ + /* + * We can be a lot more clever here, but this should take care + * of the most common optimization. + */ + if (!(v & 7)) + return 3; + else if (!(v & 3)) + return 2; + else if (!(v & 1)) + return 1; + return 0; +} + +static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) +{ + dev_err(chan2dev(&dwc->chan), + " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", + channel_readl(dwc, SAR), + channel_readl(dwc, DAR), + channel_readl(dwc, LLP), + channel_readl(dwc, CTL_HI), + channel_readl(dwc, CTL_LO)); +} + +static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + channel_clear_bit(dw, CH_EN, dwc->mask); + while (dma_readl(dw, CH_EN) & dwc->mask) + cpu_relax(); +} + +/*----------------------------------------------------------------------*/ + +/* Perform single block transfer */ +static inline void dwc_do_single_block(struct dw_dma_chan *dwc, + struct dw_desc *desc) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + u32 ctllo; + + /* Software emulation of LLP mode relies on interrupts to continue + * multi block transfer. */ + ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; + + channel_writel(dwc, SAR, desc->lli.sar); + channel_writel(dwc, DAR, desc->lli.dar); + channel_writel(dwc, CTL_LO, ctllo); + channel_writel(dwc, CTL_HI, desc->lli.ctlhi); + channel_set_bit(dw, CH_EN, dwc->mask); + + /* Move pointer to next descriptor */ + dwc->tx_node_active = dwc->tx_node_active->next; +} + +/* Called with dwc->lock held and bh disabled */ +static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + unsigned long was_soft_llp; + + /* ASSERT: channel is idle */ + if (dma_readl(dw, CH_EN) & dwc->mask) { + dev_err(chan2dev(&dwc->chan), + "BUG: Attempted to start non-idle channel\n"); + dwc_dump_chan_regs(dwc); + + /* The tasklet will hopefully advance the queue... */ + return; + } + + if (dwc->nollp) { + was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, + &dwc->flags); + if (was_soft_llp) { + dev_err(chan2dev(&dwc->chan), + "BUG: Attempted to start new LLP transfer " + "inside ongoing one\n"); + return; + } + + dwc_initialize(dwc); + + dwc->residue = first->total_len; + dwc->tx_node_active = &first->tx_list; + + /* Submit first block */ + dwc_do_single_block(dwc, first); + + return; + } + + dwc_initialize(dwc); + + channel_writel(dwc, LLP, first->txd.phys); + channel_writel(dwc, CTL_LO, + DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); + channel_writel(dwc, CTL_HI, 0); + channel_set_bit(dw, CH_EN, dwc->mask); +} + +/*----------------------------------------------------------------------*/ + +static void +dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, + bool callback_required) +{ + dma_async_tx_callback callback = NULL; + void *param = NULL; + struct dma_async_tx_descriptor *txd = &desc->txd; + struct dw_desc *child; + unsigned long flags; + + dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); + + spin_lock_irqsave(&dwc->lock, flags); + dma_cookie_complete(txd); + if (callback_required) { + callback = txd->callback; + param = txd->callback_param; + } + + /* async_tx_ack */ + list_for_each_entry(child, &desc->tx_list, desc_node) + async_tx_ack(&child->txd); + async_tx_ack(&desc->txd); + + list_splice_init(&desc->tx_list, &dwc->free_list); + list_move(&desc->desc_node, &dwc->free_list); + + if (!is_slave_direction(dwc->direction)) { + struct device *parent = chan2parent(&dwc->chan); + if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) + dma_unmap_single(parent, desc->lli.dar, + desc->total_len, DMA_FROM_DEVICE); + else + dma_unmap_page(parent, desc->lli.dar, + desc->total_len, DMA_FROM_DEVICE); + } + if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) + dma_unmap_single(parent, desc->lli.sar, + desc->total_len, DMA_TO_DEVICE); + else + dma_unmap_page(parent, desc->lli.sar, + desc->total_len, DMA_TO_DEVICE); + } + } + + spin_unlock_irqrestore(&dwc->lock, flags); + + if (callback) + callback(param); +} + +static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + struct dw_desc *desc, *_desc; + LIST_HEAD(list); + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + if (dma_readl(dw, CH_EN) & dwc->mask) { + dev_err(chan2dev(&dwc->chan), + "BUG: XFER bit set, but channel not idle!\n"); + + /* Try to continue after resetting the channel... */ + dwc_chan_disable(dw, dwc); + } + + /* + * Submit queued descriptors ASAP, i.e. before we go through + * the completed ones. + */ + list_splice_init(&dwc->active_list, &list); + if (!list_empty(&dwc->queue)) { + list_move(dwc->queue.next, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); + } + + spin_unlock_irqrestore(&dwc->lock, flags); + + list_for_each_entry_safe(desc, _desc, &list, desc_node) + dwc_descriptor_complete(dwc, desc, true); +} + +/* Returns how many bytes were already received from source */ +static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) +{ + u32 ctlhi = channel_readl(dwc, CTL_HI); + u32 ctllo = channel_readl(dwc, CTL_LO); + + return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); +} + +static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + dma_addr_t llp; + struct dw_desc *desc, *_desc; + struct dw_desc *child; + u32 status_xfer; + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + llp = channel_readl(dwc, LLP); + status_xfer = dma_readl(dw, RAW.XFER); + + if (status_xfer & dwc->mask) { + /* Everything we've submitted is done */ + dma_writel(dw, CLEAR.XFER, dwc->mask); + + if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { + struct list_head *head, *active = dwc->tx_node_active; + + /* + * We are inside first active descriptor. + * Otherwise something is really wrong. + */ + desc = dwc_first_active(dwc); + + head = &desc->tx_list; + if (active != head) { + /* Update desc to reflect last sent one */ + if (active != head->next) + desc = to_dw_desc(active->prev); + + dwc->residue -= desc->len; + + child = to_dw_desc(active); + + /* Submit next block */ + dwc_do_single_block(dwc, child); + + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + /* We are done here */ + clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); + } + + dwc->residue = 0; + + spin_unlock_irqrestore(&dwc->lock, flags); + + dwc_complete_all(dw, dwc); + return; + } + + if (list_empty(&dwc->active_list)) { + dwc->residue = 0; + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { + dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, + (unsigned long long)llp); + + list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { + /* Initial residue value */ + dwc->residue = desc->total_len; + + /* Check first descriptors addr */ + if (desc->txd.phys == llp) { + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + /* Check first descriptors llp */ + if (desc->lli.llp == llp) { + /* This one is currently in progress */ + dwc->residue -= dwc_get_sent(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + dwc->residue -= desc->len; + list_for_each_entry(child, &desc->tx_list, desc_node) { + if (child->lli.llp == llp) { + /* Currently in progress */ + dwc->residue -= dwc_get_sent(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + dwc->residue -= child->len; + } + + /* + * No descriptors so far seem to be in progress, i.e. + * this one must be done. + */ + spin_unlock_irqrestore(&dwc->lock, flags); + dwc_descriptor_complete(dwc, desc, true); + spin_lock_irqsave(&dwc->lock, flags); + } + + dev_err(chan2dev(&dwc->chan), + "BUG: All descriptors done, but channel not idle!\n"); + + /* Try to continue after resetting the channel... */ + dwc_chan_disable(dw, dwc); + + if (!list_empty(&dwc->queue)) { + list_move(dwc->queue.next, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); + } + spin_unlock_irqrestore(&dwc->lock, flags); +} + +static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) +{ + dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", + lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); +} + +static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + struct dw_desc *bad_desc; + struct dw_desc *child; + unsigned long flags; + + dwc_scan_descriptors(dw, dwc); + + spin_lock_irqsave(&dwc->lock, flags); + + /* + * The descriptor currently at the head of the active list is + * borked. Since we don't have any way to report errors, we'll + * just have to scream loudly and try to carry on. + */ + bad_desc = dwc_first_active(dwc); + list_del_init(&bad_desc->desc_node); + list_move(dwc->queue.next, dwc->active_list.prev); + + /* Clear the error flag and try to restart the controller */ + dma_writel(dw, CLEAR.ERROR, dwc->mask); + if (!list_empty(&dwc->active_list)) + dwc_dostart(dwc, dwc_first_active(dwc)); + + /* + * WARN may seem harsh, but since this only happens + * when someone submits a bad physical address in a + * descriptor, we should consider ourselves lucky that the + * controller flagged an error instead of scribbling over + * random memory locations. + */ + dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" + " cookie: %d\n", bad_desc->txd.cookie); + dwc_dump_lli(dwc, &bad_desc->lli); + list_for_each_entry(child, &bad_desc->tx_list, desc_node) + dwc_dump_lli(dwc, &child->lli); + + spin_unlock_irqrestore(&dwc->lock, flags); + + /* Pretend the descriptor completed successfully */ + dwc_descriptor_complete(dwc, bad_desc, true); +} + +/* --------------------- Cyclic DMA API extensions -------------------- */ + +dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + return channel_readl(dwc, SAR); +} +EXPORT_SYMBOL(dw_dma_get_src_addr); + +dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + return channel_readl(dwc, DAR); +} +EXPORT_SYMBOL(dw_dma_get_dst_addr); + +/* Called with dwc->lock held and all DMAC interrupts disabled */ +static void dwc_handle_cyclic(struct dw_dma *dw, str