diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-22 17:53:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-22 17:53:13 -0700 |
commit | 6447f55da90b77faec1697d499ed7986bb4f6de6 (patch) | |
tree | 2d360d48121bdaa354d1ef19fed48467d08dfb1f | |
parent | c50e3f512a5a15a73acd94e6ec8ed63cd512e04f (diff) | |
parent | 3ea205c449d2b5996d0256aa8b2894f7aea228a2 (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (66 commits)
avr32: at32ap700x: fix typo in DMA master configuration
dmaengine/dmatest: Pass timeout via module params
dma: let IMX_DMA depend on IMX_HAVE_DMA_V1 instead of an explicit list of SoCs
fsldma: make halt behave nicely on all supported controllers
fsldma: reduce locking during descriptor cleanup
fsldma: support async_tx dependencies and automatic unmapping
fsldma: fix controller lockups
fsldma: minor codingstyle and consistency fixes
fsldma: improve link descriptor debugging
fsldma: use channel name in printk output
fsldma: move related helper functions near each other
dmatest: fix automatic buffer unmap type
drivers, pch_dma: Fix warning when CONFIG_PM=n.
dmaengine/dw_dmac fix: use readl & writel instead of __raw_readl & __raw_writel
avr32: at32ap700x: Specify DMA Flow Controller, Src and Dst msize
dw_dmac: Setting Default Burst length for transfers as 16.
dw_dmac: Allow src/dst msize & flow controller to be configured at runtime
dw_dmac: Changing type of src_master and dest_master to u8.
dw_dmac: Pass Channel Priority from platform_data
dw_dmac: Pass Channel Allocation Order from platform_data
...
-rw-r--r-- | arch/arm/mach-mxs/include/mach/dma.h | 26 | ||||
-rw-r--r-- | arch/arm/plat-nomadik/include/plat/ste_dma40.h | 22 | ||||
-rw-r--r-- | arch/avr32/mach-at32ap/at32ap700x.c | 15 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 12 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 14 | ||||
-rw-r--r-- | drivers/dma/dw_dmac.c | 103 | ||||
-rw-r--r-- | drivers/dma/dw_dmac_regs.h | 12 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 551 | ||||
-rw-r--r-- | drivers/dma/fsldma.h | 6 | ||||
-rw-r--r-- | drivers/dma/mxs-dma.c | 724 | ||||
-rw-r--r-- | drivers/dma/pch_dma.c | 35 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 1402 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.c | 218 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.h | 66 | ||||
-rw-r--r-- | include/linux/dw_dmac.h | 44 |
16 files changed, 1989 insertions, 1262 deletions
diff --git a/arch/arm/mach-mxs/include/mach/dma.h b/arch/arm/mach-mxs/include/mach/dma.h new file mode 100644 index 00000000000..7f4aeeaba8d --- /dev/null +++ b/arch/arm/mach-mxs/include/mach/dma.h @@ -0,0 +1,26 @@ +/* + * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MACH_MXS_DMA_H__ +#define __MACH_MXS_DMA_H__ + +struct mxs_dma_data { + int chan_irq; +}; + +static inline int mxs_dma_is_apbh(struct dma_chan *chan) +{ + return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbh"); +} + +static inline int mxs_dma_is_apbx(struct dma_chan *chan) +{ + return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbx"); +} + +#endif /* __MACH_MXS_DMA_H__ */ diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h index 4d6dd4c39b7..c44886062f8 100644 --- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h +++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h @@ -104,6 +104,8 @@ struct stedma40_half_channel_info { * * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH * @high_priority: true if high-priority + * @realtime: true if realtime mode is to be enabled. Only available on DMA40 + * version 3+, i.e DB8500v2+ * @mode: channel mode: physical, logical, or operation * @mode_opt: options for the chosen channel mode * @src_dev_type: Src device type @@ -119,6 +121,7 @@ struct stedma40_half_channel_info { struct stedma40_chan_cfg { enum stedma40_xfer_dir dir; bool high_priority; + bool realtime; enum stedma40_mode mode; enum stedma40_mode_opt mode_opt; int src_dev_type; @@ -169,25 +172,6 @@ struct stedma40_platform_data { bool stedma40_filter(struct dma_chan *chan, void *data); /** - * stedma40_memcpy_sg() - extension of the dma framework, memcpy to/from - * scattergatter lists. - * - * @chan: dmaengine handle - * @sgl_dst: Destination scatter list - * @sgl_src: Source scatter list - * @sgl_len: The length of each scatterlist. Both lists must be of equal length - * and each element must match the corresponding element in the other scatter - * list. - * @flags: is actually enum dma_ctrl_flags. See dmaengine.h - */ - -struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, - struct scatterlist *sgl_dst, - struct scatterlist *sgl_src, - unsigned int sgl_len, - unsigned long flags); - -/** * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave * (=device) * diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index e67c9994542..bfc9d071db9 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -2048,6 +2048,11 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, rx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3); rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); + rx_dws->src_master = 0; + rx_dws->dst_master = 1; + rx_dws->src_msize = DW_DMA_MSIZE_1; + rx_dws->dst_msize = DW_DMA_MSIZE_1; + rx_dws->fc = DW_DMA_FC_D_P2M; } /* Check if DMA slave interface for playback should be configured. */ @@ -2056,6 +2061,11 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; tx_dws->cfg_hi = DWC_CFGH_DST_PER(4); tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); + tx_dws->src_master = 0; + tx_dws->dst_master = 1; + tx_dws->src_msize = DW_DMA_MSIZE_1; + tx_dws->dst_msize = DW_DMA_MSIZE_1; + tx_dws->fc = DW_DMA_FC_D_M2P; } if (platform_device_add_data(pdev, data, @@ -2128,6 +2138,11 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data) dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT; dws->cfg_hi = DWC_CFGH_DST_PER(2); dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); + dws->src_master = 0; + dws->dst_master = 1; + dws->src_msize = DW_DMA_MSIZE_1; + dws->dst_msize = DW_DMA_MSIZE_1; + dws->fc = DW_DMA_FC_D_M2P; if (platform_device_add_data(pdev, data, sizeof(struct atmel_abdac_pdata))) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 1c28816152f..a572600e44e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -82,7 +82,7 @@ config INTEL_IOP_ADMA config DW_DMAC tristate "Synopsys DesignWare AHB DMA support" - depends on AVR32 + depends on HAVE_CLK select DMA_ENGINE default y if CPU_AT32AP7000 help @@ -221,12 +221,20 @@ config IMX_SDMA config IMX_DMA tristate "i.MX DMA support" - depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27 + depends on IMX_HAVE_DMA_V1 select DMA_ENGINE help Support the i.MX DMA engine. This engine is integrated into Freescale i.MX1/21/27 chips. +config MXS_DMA + bool "MXS DMA support" + depends on SOC_IMX23 || SOC_IMX28 + select DMA_ENGINE + help + Support the MXS DMA engine. This engine including APBH-DMA + and APBX-DMA is integrated into Freescale i.MX23/28 chips. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 1be065a62f8..836095ab3c5 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o obj-$(CONFIG_IMX_DMA) += imx-dma.o +obj-$(CONFIG_MXS_DMA) += mxs-dma.o obj-$(CONFIG_TIMB_DMA) += timb_dma.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_PL330_DMA) += pl330.o diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 5589358b684..e0888cb538d 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -54,6 +54,11 @@ module_param(pq_sources, uint, S_IRUGO); MODULE_PARM_DESC(pq_sources, "Number of p+q source buffers (default: 3)"); +static int timeout = 3000; +module_param(timeout, uint, S_IRUGO); +MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), \ + Pass -1 for infinite timeout"); + /* * Initialization patterns. All bytes in the source buffer has bit 7 * set, all bytes in the destination buffer has bit 7 cleared. @@ -285,7 +290,12 @@ static int dmatest_func(void *data) set_user_nice(current, 10); - flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; + /* + * src buffers are freed by the DMAEngine code with dma_unmap_single() + * dst buffers are freed by ourselves below + */ + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT + | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; while (!kthread_should_stop() && !(iterations && total_tests >= iterations)) { @@ -294,7 +304,7 @@ static int dmatest_func(void *data) dma_addr_t dma_srcs[src_cnt]; dma_addr_t dma_dsts[dst_cnt]; struct completion cmp; - unsigned long tmo = msecs_to_jiffies(3000); + unsigned long tmo = msecs_to_jiffies(timeout); u8 align = 0; total_tests++; diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index a3991ab0d67..9c25c7d099e 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -32,26 +32,30 @@ * which does not support descriptor writeback. */ -/* NOTE: DMS+SMS is system-specific. We should get this information - * from the platform code somehow. - */ -#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \ - | DWC_CTLL_SRC_MSIZE(0) \ - | DWC_CTLL_DMS(0) \ - | DWC_CTLL_SMS(1) \ - | DWC_CTLL_LLP_D_EN \ - | DWC_CTLL_LLP_S_EN) +#define DWC_DEFAULT_CTLLO(private) ({ \ + struct dw_dma_slave *__slave = (private); \ + int dms = __slave ? __slave->dst_master : 0; \ + int sms = __slave ? __slave->src_master : 1; \ + u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \ + u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \ + \ + (DWC_CTLL_DST_MSIZE(dmsize) \ + | DWC_CTLL_SRC_MSIZE(smsize) \ + | DWC_CTLL_LLP_D_EN \ + | DWC_CTLL_LLP_S_EN \ + | DWC_CTLL_DMS(dms) \ + | DWC_CTLL_SMS(sms)); \ + }) /* * This is configuration-dependent and usually a funny size like 4095. - * Let's round it down to the nearest power of two. * * Note that this is a transfer count, i.e. if we transfer 32-bit - * words, we can do 8192 bytes per descriptor. + * words, we can do 16380 bytes per descriptor. * * This parameter is also system-specific. */ -#define DWC_MAX_COUNT 2048U +#define DWC_MAX_COUNT 4095U /* * Number of descriptors to allocate for each channel. This should be @@ -84,11 +88,6 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) return list_entry(dwc->active_list.next, struct dw_desc, desc_node); } -static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc) -{ - return list_entry(dwc->queue.next, struct dw_desc, desc_node); -} - static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) { struct dw_desc *desc, *_desc; @@ -201,6 +200,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) dma_async_tx_callback callback; void *param; struct dma_async_tx_descriptor *txd = &desc->txd; + struct dw_desc *child; dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); @@ -209,6 +209,12 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) param = txd->callback_param; dwc_sync_desc_for_cpu(dwc, desc); + + /* async_tx_ack */ + list_for_each_entry(child, &desc->tx_list, desc_node) + async_tx_ack(&child->txd); + async_tx_ack(&desc->txd); + list_splice_init(&desc->tx_list, &dwc->free_list); list_move(&desc->desc_node, &dwc->free_list); @@ -259,10 +265,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) * Submit queued descriptors ASAP, i.e. before we go through * the completed ones. */ - if (!list_empty(&dwc->queue)) - dwc_dostart(dwc, dwc_first_queued(dwc)); list_splice_init(&dwc->active_list, &list); - list_splice_init(&dwc->queue, &dwc->active_list); + if (!list_empty(&dwc->queue)) { + list_move(dwc->queue.next, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); + } list_for_each_entry_safe(desc, _desc, &list, desc_node) dwc_descriptor_complete(dwc, desc); @@ -291,6 +298,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) return; } + if (list_empty(&dwc->active_list)) + return; + dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { @@ -319,8 +329,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) cpu_relax(); if (!list_empty(&dwc->queue)) { - dwc_dostart(dwc, dwc_first_queued(dwc)); - list_splice_init(&dwc->queue, &dwc->active_list); + list_move(dwc->queue.next, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); } } @@ -346,7 +356,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) */ bad_desc = dwc_first_active(dwc); list_del_init(&bad_desc->desc_node); - list_splice_init(&dwc->queue, dwc->active_list.prev); + list_move(dwc->queue.next, dwc->active_list.prev); /* Clear the error flag and try to restart the controller */ dma_writel(dw, CLEAR.ERROR, dwc->mask); @@ -541,8 +551,8 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) if (list_empty(&dwc->active_list)) { dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", desc->txd.cookie); - dwc_dostart(dwc, desc); list_add_tail(&desc->desc_node, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); } else { dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", desc->txd.cookie); @@ -581,14 +591,16 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, * We can be a lot more clever here, but this should take care * of the most common optimization. */ - if (!((src | dest | len) & 3)) + if (!((src | dest | len) & 7)) + src_width = dst_width = 3; + else if (!((src | dest | len) & 3)) src_width = dst_width = 2; else if (!((src | dest | len) & 1)) src_width = dst_width = 1; else src_width = dst_width = 0; - ctllo = DWC_DEFAULT_CTLLO + ctllo = DWC_DEFAULT_CTLLO(chan->private) | DWC_CTLL_DST_WIDTH(dst_width) | DWC_CTLL_SRC_WIDTH(src_width) | DWC_CTLL_DST_INC @@ -669,11 +681,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, switch (direction) { case DMA_TO_DEVICE: - ctllo = (DWC_DEFAULT_CTLLO + ctllo = (DWC_DEFAULT_CTLLO(chan->private) | DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_DST_FIX | DWC_CTLL_SRC_INC - | DWC_CTLL_FC_M2P); + | DWC_CTLL_FC(dws->fc)); reg = dws->tx_reg; for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; @@ -714,11 +726,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, } break; case DMA_FROM_DEVICE: - ctllo = (DWC_DEFAULT_CTLLO + ctllo = (DWC_DEFAULT_CTLLO(chan->private) | DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_DST_INC | DWC_CTLL_SRC_FIX - | DWC_CTLL_FC_P2M); + | DWC_CTLL_FC(dws->fc)); reg = dws->rx_reg; for_each_sg(sgl, sg, sg_len, i) { @@ -834,7 +846,9 @@ dwc_tx_status(struct dma_chan *chan, ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret != DMA_SUCCESS) { + spin_lock_bh(&dwc->lock); dwc_scan_descriptors(to_dw_dma(chan->device), dwc); + spin_unlock_bh(&dwc->lock); last_complete = dwc->completed; last_used = chan->cookie; @@ -889,8 +903,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); cfghi = dws->cfg_hi; - cfglo = dws->cfg_lo; + cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; } + + cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority); + channel_writel(dwc, CFG_LO, cfglo); channel_writel(dwc, CFG_HI, cfghi); @@ -1126,23 +1143,23 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, case DMA_TO_DEVICE: desc->lli.dar = dws->tx_reg; desc->lli.sar = buf_addr + (period_len * i); - desc->lli.ctllo = (DWC_DEFAULT_CTLLO + desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) | DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_DST_FIX | DWC_CTLL_SRC_INC - | DWC_CTLL_FC_M2P + | DWC_CTLL_FC(dws->fc) | DWC_CTLL_INT_EN); break; case DMA_FROM_DEVICE: desc->lli.dar = buf_addr + (period_len * i); desc->lli.sar = dws->rx_reg; - desc->lli.ctllo = (DWC_DEFAULT_CTLLO + desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) | DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_DST_INC | DWC_CTLL_SRC_FIX - | DWC_CTLL_FC_P2M + | DWC_CTLL_FC(dws->fc) | DWC_CTLL_INT_EN); break; default: @@ -1307,7 +1324,17 @@ static int __init dw_probe(struct platform_device *pdev) dwc->chan.device = &dw->dma; dwc->chan.cookie = dwc->completed = 1; dwc->chan.chan_id = i; - list_add_tail(&dwc->chan.device_node, &dw->dma.channels); + if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) + list_add_tail(&dwc->chan.device_node, + &dw->dma.channels); + else + list_add(&dwc->chan.device_node, &dw->dma.channels); + + /* 7 is highest priority & 0 is lowest. */ + if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) + dwc->priority = 7 - i; + else + dwc->priority = i; dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; spin_lock_init(&dwc->lock); @@ -1335,6 +1362,8 @@ static int __init dw_probe(struct platform_device *pdev) dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); + if (pdata->is_private) + dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); dw->dma.dev = &pdev->dev; dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; dw->dma.device_free_chan_resources = dwc_free_chan_resources; @@ -1447,7 +1476,7 @@ static int __init dw_init(void) { return platform_driver_probe(&dw_driver, dw_probe); } -module_init(dw_init); +subsys_initcall(dw_init); static void __exit dw_exit(void) { diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index d9a939f67f4..720f821527f 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -86,6 +86,7 @@ struct dw_dma_regs { #define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) #define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ #define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ +#define DWC_CTLL_FC(n) ((n) << 20) #define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ #define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ #define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ @@ -101,6 +102,8 @@ struct dw_dma_regs { #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff /* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */ +#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ +#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ @@ -134,6 +137,7 @@ struct dw_dma_chan { struct dma_chan chan; void __iomem *ch_regs; u8 mask; + u8 priority; spinlock_t lock; @@ -155,9 +159,9 @@ __dwc_regs(struct dw_dma_chan *dwc) } #define channel_readl(dwc, name) \ - __raw_readl(&(__dwc_regs(dwc)->name)) + readl(&(__dwc_regs(dwc)->name)) #define channel_writel(dwc, name, val) \ - __raw_writel((val), &(__dwc_regs(dwc)->name)) + writel((val), &(__dwc_regs(dwc)->name)) static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) { @@ -181,9 +185,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) } #define dma_readl(dw, name) \ - __raw_readl(&(__dw_regs(dw)->name)) + readl(&(__dw_regs(dw)->name)) #define dma_writel(dw, name, val) \ - __raw_writel((val), &(__dw_regs(dw)->name)) + writel((val), &(__dw_regs(dw)->name)) #define channel_set_bit(dw, reg, mask) \ dma_writel(dw, reg, ((mask) << 8) | (mask)) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index e3854a8f0de..6b396759e7f 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -37,35 +37,16 @@ #include "fsldma.h" -static const char msg_ld_oom[] = "No free memory for link descriptor\n"; +#define chan_dbg(chan, fmt, arg...) \ + dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg) +#define chan_err(chan, fmt, arg...) \ + dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) -static void dma_init(struct fsldma_chan *chan) -{ - /* Reset the channel */ - DMA_OUT(chan, &chan->regs->mr, 0, 32); +static const char msg_ld_oom[] = "No free memory for link descriptor"; - switch (chan->feature & FSL_DMA_IP_MASK) { - case FSL_DMA_IP_85XX: - /* Set the channel to below modes: - * EIE - Error interrupt enable - * EOSIE - End of segments interrupt enable (basic mode) - * EOLNIE - End of links interrupt enable - * BWC - Bandwidth sharing among channels - */ - DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC - | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE - | FSL_DMA_MR_EOSIE, 32); - break; - case FSL_DMA_IP_83XX: - /* Set the channel to below modes: - * EOTIE - End-of-transfer interrupt enable - * PRC_RM - PCI read multiple - */ - DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE - | FSL_DMA_MR_PRC_RM, 32); - break; - } -} +/* + * Register Helpers + */ static void set_sr(struct fsldma_chan *chan, u32 val) { @@ -77,14 +58,38 @@ static u32 get_sr(struct fsldma_chan *chan) return DMA_IN(chan, &chan->regs->sr, 32); } +static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) +{ + DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); +} + +static dma_addr_t get_cdar(struct fsldma_chan *chan) +{ + return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; +} + +static u32 get_bcr(struct fsldma_chan *chan) +{ + return DMA_IN(chan, &chan->regs->bcr, 32); +} + +/* + * Descriptor Helpers + */ + static void set_desc_cnt(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, u32 count) { hw->count = CPU_TO_DMA(chan, count, 32); } +static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) +{ + return DMA_TO_CPU(chan, desc->hw.count, 32); +} + static void set_desc_src(struct fsldma_chan *chan, - struct fsl_dma_ld_hw *hw, dma_addr_t src) + struct fsl_dma_ld_hw *hw, dma_addr_t src) { u64 snoop_bits; @@ -93,8 +98,18 @@ static void set_desc_src(struct fsldma_chan *chan, hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); } +static dma_addr_t get_desc_src(struct fsldma_chan *chan, + struct fsl_desc_sw *desc) +{ + u64 snoop_bits; + + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) + ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; + return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; +} + static void set_desc_dst(struct fsldma_chan *chan, - struct fsl_dma_ld_hw *hw, dma_addr_t dst) + struct fsl_dma_ld_hw *hw, dma_addr_t dst) { u64 snoop_bits; @@ -103,8 +118,18 @@ static void set_desc_dst(struct fsldma_chan *chan, hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); } +static dma_addr_t get_desc_dst(struct fsldma_chan *chan, + struct fsl_desc_sw *desc) +{ + u64 snoop_bits; + + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) + ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; + return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; +} + static void set_desc_next(struct fsldma_chan *chan, - struct fsl_dma_ld_hw *hw, dma_addr_t next) + struct fsl_dma_ld_hw *hw, dma_addr_t next) { u64 snoop_bits; @@ -113,24 +138,46 @@ static void set_desc_next(struct fsldma_chan *chan, hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); } -static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) +static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { - DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); -} + u64 snoop_bits; -static dma_addr_t get_cdar(struct fsldma_chan *chan) -{ - return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; -} + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) + ? FSL_DMA_SNEN : 0; -static dma_addr_t get_ndar(struct fsldma_chan *chan) -{ - return DMA_IN(chan, &chan->regs->ndar, 64); + desc->hw.next_ln_addr = CPU_TO_DMA(chan, + DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL + | snoop_bits, 64); } -static u32 get_bcr(struct fsldma_chan *chan) +/* + * DMA Engine Hardware Control Helpers + */ + +static void dma_init(struct fsldma_chan *chan) { - return DMA_IN(chan, &chan->regs->bcr, 32); + /* Reset the channel */ + DMA_OUT(chan, &chan->regs->mr, 0, 32); + + switch (chan->feature & FSL_DMA_IP_MASK) { + case FSL_DMA_IP_85XX: + /* Set the channel to below modes: + * EIE - Error interrupt enable + * EOLNIE - End of links interrupt enable + * BWC - Bandwidth sharing among channels + */ + DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC + | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32); + break; + case FSL_DMA_IP_83XX: + /* Set the channel to below modes: + * EOTIE - End-of-transfer interrupt enable + * PRC_RM - PCI read multiple + */ + DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE + | FSL_DMA_MR_PRC_RM, 32); + break; + } } static int dma_is_idle(struct fsldma_chan *chan) @@ -139,25 +186,32 @@ static int dma_is_idle(struct fsldma_chan *chan) return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); } +/* + * Start the DMA controller + * + * Preconditions: + * - the CDAR register must point to the start descriptor + * - the MRn[CS] bit must be cleared + */ static void dma_start(struct fsldma_chan *chan) { u32 mode; mode = DMA_IN(chan, &chan->regs->mr, 32); - if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { - if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { - DMA_OUT(chan, &chan->regs->bcr, 0, 32); - mode |= FSL_DMA_MR_EMP_EN; - } else { - mode &= ~FSL_DMA_MR_EMP_EN; - } + if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { + DMA_OUT(chan, &chan->regs->bcr, 0, 32); + mode |= FSL_DMA_MR_EMP_EN; + } else { + mode &= ~FSL_DMA_MR_EMP_EN; } - if (chan->feature & FSL_DMA_CHAN_START_EXT) + if (chan->feature & FSL_DMA_CHAN_START_EXT) { mode |= FSL_DMA_MR_EMS_EN; - else + } else { + mode &= ~FSL_DMA_MR_EMS_EN; mode |= FSL_DMA_MR_CS; + } DMA_OUT(chan, &chan->regs->mr, mode, 32); } @@ -167,13 +221,26 @@ static void dma_halt(struct fsldma_chan *chan) u32 mode; int i; + /* read the mode register */ mode = DMA_IN(chan, &chan->regs->mr, 32); - mode |= FSL_DMA_MR_CA; - DMA_OUT(chan, &chan->regs->mr, mode, 32); - mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); + /* + * The 85xx controller supports channel abort, which will stop + * the current transfer. On 83xx, this bit is the transfer error + * mask bit, which should not be changed. + */ + if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { + mode |= FSL_DMA_MR_CA; + DMA_OUT(chan, &chan->regs->mr, mode, 32); + + mode &= ~FSL_DMA_MR_CA; + } + + /* stop the DMA controller */ + mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN); DMA_OUT(chan, &chan->regs->mr, mode, 32); + /* wait for the DMA controller to become idle */ for (i = 0; i < 100; i++) { if (dma_is_idle(chan)) return; @@ -182,20 +249,7 @@ static void dma_halt(struct fsldma_chan *chan) } if (!dma_is_idle(chan)) - dev_err(chan->dev, "DMA halt timeout!\n"); -} - -static void set_ld_eol(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) -{ - u64 snoop_bits; - - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) - ? FSL_DMA_SNEN : 0; - - desc->hw.next_ln_addr = CPU_TO_DMA(chan, - DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL - | snoop_bits, 64); + chan_err(chan, "DMA halt timeout!\n"); } /** @@ -321,8 +375,7 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) chan->feature &= ~FSL_DMA_CHAN_START_EXT; } -static void append_ld_queue(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) +static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); @@ -363,8 +416,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) cookie = chan->common.cookie; list_for_each_entry(child, &desc->tx_list, node) { cookie++; - if (cookie < 0) - cookie = 1; + if (cookie < DMA_MIN_COOKIE) + cookie = DMA_MIN_COOKIE; child->async_tx.cookie = cookie; } @@ -385,15 +438,14 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) * * Return - The descriptor allocated. NULL for failed. */ -static struct fsl_desc_sw *fsl_dma_alloc_descriptor( - struct fsldma_chan *chan) +static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) { struct fsl_desc_sw *desc; dma_addr_t pdesc; desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); if (!desc) { - dev_dbg(chan->dev, "out of memory for link desc\n"); + chan_dbg(chan, "out of memory for link descriptor\n"); return NULL; } @@ -403,10 +455,13 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( desc->async_tx.tx_submit = fsl_dma_tx_submit; desc->async_tx.phys = pdesc; +#ifdef FSL_DMA_LD_DEBUG + chan_dbg(chan, "LD %p allocated\n", desc); +#endif + return desc; } - /** * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. * @chan : Freescale DMA channel @@ -427,13 +482,11 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) * We need the descriptor to be aligned to 32bytes * for meeting FSL DMA specification requirement. */ - chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", - chan->dev, + chan->desc_pool = dma_pool_create(chan->name, chan->dev, sizeof(struct fsl_desc_sw), __alignof__(struct fsl_desc_sw), 0); if (!chan->desc_pool) { - dev_err(chan->dev, "unable to allocate channel %d " - "descriptor pool\n", chan->id); + chan_err(chan, "unable to allocate descriptor pool\n"); return -ENOMEM; } @@ -455,6 +508,9 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan, list_for_each_entry_safe(desc, _desc, list, node) { list_del(&desc->node); +#ifdef FSL_DMA_LD_DEBUG + chan_dbg(chan, "LD %p free\n", desc); +#endif dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } } @@ -466,6 +522,9 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, list_for_each_entry_safe_reverse(desc, _desc, list, node) { list_del(&desc->node); +#ifdef FSL_DMA_LD_DEBUG + chan_dbg(chan, "LD %p free\n", desc); +#endif dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } } @@ -479,7 +538,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan) struct fsldma_chan *chan = to_fsl_chan(dchan); unsigned long flags; - dev_dbg(chan->dev, "Free all channel resources.\n"); + chan_dbg(chan, "free all channel |