diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 17:55:21 -0700 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 17:55:21 -0700 |
commit | bbb20089a3275a19e475dbc21320c3742e3ca423 (patch) | |
tree | 216fdc1cbef450ca688135c5b8969169482d9a48 /drivers/dma | |
parent | 3e48e656903e9fd8bc805c6a2c4264d7808d315b (diff) | |
parent | 657a77fa7284d8ae28dfa48f1dc5d919bf5b2843 (diff) |
Merge branch 'dmaengine' into async-tx-next
Conflicts:
crypto/async_tx/async_xor.c
drivers/dma/ioat/dma_v2.h
drivers/dma/ioat/pci.c
drivers/md/raid5.c
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 18 | ||||
-rw-r--r-- | drivers/dma/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/at_hdmac.c | 1215 | ||||
-rw-r--r-- | drivers/dma/at_hdmac_regs.h | 354 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 1 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 21 | ||||
-rw-r--r-- | drivers/dma/dw_dmac.c | 50 | ||||
-rw-r--r-- | drivers/dma/dw_dmac_regs.h | 1 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 305 | ||||
-rw-r--r-- | drivers/dma/fsldma.h | 5 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 7 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 3 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 5 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.h | 3 | ||||
-rw-r--r-- | drivers/dma/ioat/pci.c | 16 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 9 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 9 | ||||
-rw-r--r-- | drivers/dma/mv_xor.h | 4 | ||||
-rw-r--r-- | drivers/dma/txx9dmac.c | 1356 | ||||
-rw-r--r-- | drivers/dma/txx9dmac.h | 308 |
20 files changed, 3618 insertions, 74 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index ddcd9793b25..fe1f3717b1f 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -50,6 +50,14 @@ config DW_DMAC Support the Synopsys DesignWare AHB DMA controller. This can be integrated in chips such as the Atmel AT32ap7000. +config AT_HDMAC + tristate "Atmel AHB DMA support" + depends on ARCH_AT91SAM9RL + select DMA_ENGINE + help + Support the Atmel AHB DMA controller. This can be integrated in + chips such as the Atmel AT91SAM9RL. + config FSL_DMA tristate "Freescale Elo and Elo Plus DMA support" depends on FSL_SOC @@ -85,6 +93,14 @@ config MX3_IPU_IRQS To avoid bloating the irq_desc[] array we allocate a sufficient number of IRQ slots and map them dynamically to specific sources. +config TXX9_DMAC + tristate "Toshiba TXx9 SoC DMA support" + depends on MACH_TX49XX || MACH_TX39XX + select DMA_ENGINE + help + Support the TXx9 SoC internal DMA controller. This can be + integrated in chips such as the Toshiba TX4927/38/39. + config DMA_ENGINE bool @@ -104,7 +120,7 @@ config NET_DMA config ASYNC_TX_DMA bool "Async_tx: Offload support for the async_tx api" - depends on DMA_ENGINE + depends on DMA_ENGINE && !HIGHMEM64G help This allows the async_tx api to take advantage of offload engines for memcpy, memset, xor, and raid6 p+q operations. If your platform has diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index a1cb2857bba..8f115e93b4a 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -6,4 +6,6 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_MV_XOR) += mv_xor.o obj-$(CONFIG_DW_DMAC) += dw_dmac.o +obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_MX3_IPU) += ipu/ +obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c new file mode 100644 index 00000000000..0aeb578a24e --- /dev/null +++ b/drivers/dma/at_hdmac.c @@ -0,0 +1,1215 @@ +/* + * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) + * + * Copyright (C) 2008 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * + * This supports the Atmel AHB DMA Controller, + * + * The driver has currently been tested with the Atmel AT91SAM9RL + * and AT91SAM9G45 series. + */ + +#include <linux/clk.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include "at_hdmac_regs.h" + +/* + * Glossary + * -------- + * + * at_hdmac : Name of the ATmel AHB DMA Controller + * at_dma_ / atdma : ATmel DMA controller entity related + * atc_ / atchan : ATmel DMA Channel entity related + */ + +#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) +#define ATC_DEFAULT_CTRLA (0) +#define ATC_DEFAULT_CTRLB (ATC_SIF(0) \ + |ATC_DIF(1)) + +/* + * Initial number of descriptors to allocate for each channel. This could + * be increased during dma usage. + */ +static unsigned int init_nr_desc_per_channel = 64; +module_param(init_nr_desc_per_channel, uint, 0644); +MODULE_PARM_DESC(init_nr_desc_per_channel, + "initial descriptors per channel (default: 64)"); + + +/* prototypes */ +static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); + + +/*----------------------------------------------------------------------*/ + +static struct at_desc *atc_first_active(struct at_dma_chan *atchan) +{ + return list_first_entry(&atchan->active_list, + struct at_desc, desc_node); +} + +static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) +{ + return list_first_entry(&atchan->queue, + struct at_desc, desc_node); +} + +/** + * atc_alloc_descriptor - allocate and return an initilized descriptor + * @chan: the channel to allocate descriptors for + * @gfp_flags: GFP allocation flags + * + * Note: The ack-bit is positioned in the descriptor flag at creation time + * to make initial allocation more convenient. This bit will be cleared + * and control will be given to client at usage time (during + * preparation functions). + */ +static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, + gfp_t gfp_flags) +{ + struct at_desc *desc = NULL; + struct at_dma *atdma = to_at_dma(chan->device); + dma_addr_t phys; + + desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); + if (desc) { + memset(desc, 0, sizeof(struct at_desc)); + INIT_LIST_HEAD(&desc->tx_list); + dma_async_tx_descriptor_init(&desc->txd, chan); + /* txd.flags will be overwritten in prep functions */ + desc->txd.flags = DMA_CTRL_ACK; + desc->txd.tx_submit = atc_tx_submit; + desc->txd.phys = phys; + } + + return desc; +} + +/** + * atc_desc_get - get a unsused descriptor from free_list + * @atchan: channel we want a new descriptor for + */ +static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) +{ + struct at_desc *desc, *_desc; + struct at_desc *ret = NULL; + unsigned int i = 0; + LIST_HEAD(tmp_list); + + spin_lock_bh(&atchan->lock); + list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { + i++; + if (async_tx_test_ack(&desc->txd)) { + list_del(&desc->desc_node); + ret = desc; + break; + } + dev_dbg(chan2dev(&atchan->chan_common), + "desc %p not ACKed\n", desc); + } + spin_unlock_bh(&atchan->lock); + dev_vdbg(chan2dev(&atchan->chan_common), + "scanned %u descriptors on freelist\n", i); + + /* no more descriptor available in initial pool: create one more */ + if (!ret) { + ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); + if (ret) { + spin_lock_bh(&atchan->lock); + atchan->descs_allocated++; + spin_unlock_bh(&atchan->lock); + } else { + dev_err(chan2dev(&atchan->chan_common), + "not enough descriptors available\n"); + } + } + + return ret; +} + +/** + * atc_desc_put - move a descriptor, including any children, to the free list + * @atchan: channel we work on + * @desc: descriptor, at the head of a chain, to move to free list + */ +static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) +{ + if (desc) { + struct at_desc *child; + + spin_lock_bh(&atchan->lock); + list_for_each_entry(child, &desc->tx_list, desc_node) + dev_vdbg(chan2dev(&atchan->chan_common), + "moving child desc %p to freelist\n", + child); + list_splice_init(&desc->tx_list, &atchan->free_list); + dev_vdbg(chan2dev(&atchan->chan_common), + "moving desc %p to freelist\n", desc); + list_add(&desc->desc_node, &atchan->free_list); + spin_unlock_bh(&atchan->lock); + } +} + +/** + * atc_assign_cookie - compute and assign new cookie + * @atchan: channel we work on + * @desc: descriptor to asign cookie for + * + * Called with atchan->lock held and bh disabled + */ +static dma_cookie_t +atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc) +{ + dma_cookie_t cookie = atchan->chan_common.cookie; + + if (++cookie < 0) + cookie = 1; + + atchan->chan_common.cookie = cookie; + desc->txd.cookie = cookie; + + return cookie; +} + +/** + * atc_dostart - starts the DMA engine for real + * @atchan: the channel we want to start + * @first: first descriptor in the list we want to begin with + * + * Called with atchan->lock held and bh disabled + */ +static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) +{ + struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + + /* ASSERT: channel is idle */ + if (atc_chan_is_enabled(atchan)) { + dev_err(chan2dev(&atchan->chan_common), + "BUG: Attempted to start non-idle channel\n"); + dev_err(chan2dev(&atchan->chan_common), + " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", + channel_readl(atchan, SADDR), + channel_readl(atchan, DADDR), + channel_readl(atchan, CTRLA), + channel_readl(atchan, CTRLB), + channel_readl(atchan, DSCR)); + + /* The tasklet will hopefully advance the queue... */ + return; + } + + vdbg_dump_regs(atchan); + + /* clear any pending interrupt */ + while (dma_readl(atdma, EBCISR)) + cpu_relax(); + + channel_writel(atchan, SADDR, 0); + channel_writel(atchan, DADDR, 0); + channel_writel(atchan, CTRLA, 0); + channel_writel(atchan, CTRLB, 0); + channel_writel(atchan, DSCR, first->txd.phys); + dma_writel(atdma, CHER, atchan->mask); + + vdbg_dump_regs(atchan); +} + +/** + * atc_chain_complete - finish work for one transaction chain + * @atchan: channel we work on + * @desc: descriptor at the head of the chain we want do complete + * + * Called with atchan->lock held and bh disabled */ +static void +atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) +{ + dma_async_tx_callback callback; + void *param; + struct dma_async_tx_descriptor *txd = &desc->txd; + + dev_vdbg(chan2dev(&atchan->chan_common), + "descriptor %u complete\n", txd->cookie); + + atchan->completed_cookie = txd->cookie; + callback = txd->callback; + param = txd->callback_param; + + /* move children to free_list */ + list_splice_init(&desc->tx_list, &atchan->free_list); + /* move myself to free_list */ + list_move(&desc->desc_node, &atchan->free_list); + + /* unmap dma addresses */ + if (!atchan->chan_common.private) { + struct device *parent = chan2parent(&atchan->chan_common); + if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) + dma_unmap_single(parent, + desc->lli.daddr, + desc->len, DMA_FROM_DEVICE); + else + dma_unmap_page(parent, + desc->lli.daddr, + desc->len, DMA_FROM_DEVICE); + } + if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) + dma_unmap_single(parent, + desc->lli.saddr, + desc->len, DMA_TO_DEVICE); + else + dma_unmap_page(parent, + desc->lli.saddr, + desc->len, DMA_TO_DEVICE); + } + } + + /* + * The API requires that no submissions are done from a + * callback, so we don't need to drop the lock here + */ + if (callback) + callback(param); + + dma_run_dependencies(txd); +} + +/** + * atc_complete_all - finish work for all transactions + * @atchan: channel to complete transactions for + * + * Eventually submit queued descriptors if any + * + * Assume channel is idle while calling this function + * Called with atchan->lock held and bh disabled + */ +static void atc_complete_all(struct at_dma_chan *atchan) +{ + struct at_desc *desc, *_desc; + LIST_HEAD(list); + + dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); + + BUG_ON(atc_chan_is_enabled(atchan)); + + /* + * Submit queued descriptors ASAP, i.e. before we go through + * the completed ones. + */ + if (!list_empty(&atchan->queue)) + atc_dostart(atchan, atc_first_queued(atchan)); + /* empty active_list now it is completed */ + list_splice_init(&atchan->active_list, &list); + /* empty queue list by moving descriptors (if any) to active_list */ + list_splice_init(&atchan->queue, &atchan->active_list); + + list_for_each_entry_safe(desc, _desc, &list, desc_node) + atc_chain_complete(atchan, desc); +} + +/** + * atc_cleanup_descriptors - cleanup up finished descriptors in active_list + * @atchan: channel to be cleaned up + * + * Called with atchan->lock held and bh disabled + */ +static void atc_cleanup_descriptors(struct at_dma_chan *atchan) +{ + struct at_desc *desc, *_desc; + struct at_desc *child; + + dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); + + list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { + if (!(desc->lli.ctrla & ATC_DONE)) + /* This one is currently in progress */ + return; + + list_for_each_entry(child, &desc->tx_list, desc_node) + if (!(child->lli.ctrla & ATC_DONE)) + /* Currently in progress */ + return; + + /* + * No descriptors so far seem to be in progress, i.e. + * this chain must be done. + */ + atc_chain_complete(atchan, desc); + } +} + +/** + * atc_advance_work - at the end of a transaction, move forward + * @atchan: channel where the transaction ended + * + * Called with atchan->lock held and bh disabled + */ +static void atc_advance_work(struct at_dma_chan *atchan) +{ + dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); + + if (list_empty(&atchan->active_list) || + list_is_singular(&atchan->active_list)) { + atc_complete_all(atchan); + } else { + atc_chain_complete(atchan, atc_first_active(atchan)); + /* advance work */ + atc_dostart(atchan, atc_first_active(atchan)); + } +} + + +/** + * atc_handle_error - handle errors reported by DMA controller + * @atchan: channel where error occurs + * + * Called with atchan->lock held and bh disabled + */ +static void atc_handle_error(struct at_dma_chan *atchan) +{ + struct at_desc *bad_desc; + struct at_desc *child; + + /* + * The descriptor currently at the head of the active list is + * broked. Since we don't have any way to report errors, we'll + * just have to scream loudly and try to carry on. + */ + bad_desc = atc_first_active(atchan); + list_del_init(&bad_desc->desc_node); + + /* As we are stopped, take advantage to push queued descriptors + * in active_list */ + list_splice_init(&atchan->queue, atchan->active_list.prev); + + /* Try to restart the controller */ + if (!list_empty(&atchan->active_list)) + atc_dostart(atchan, atc_first_active(atchan)); + + /* + * KERN_CRITICAL may seem harsh, but since this only happens + * when someone submits a bad physical address in a + * descriptor, we should consider ourselves lucky that the + * controller flagged an error instead of scribbling over + * random memory locations. + */ + dev_crit(chan2dev(&atchan->chan_common), + "Bad descriptor submitted for DMA!\n"); + dev_crit(chan2dev(&atchan->chan_common), + " cookie: %d\n", bad_desc->txd.cookie); + atc_dump_lli(atchan, &bad_desc->lli); + list_for_each_entry(child, &bad_desc->tx_list, desc_node) + atc_dump_lli(atchan, &child->lli); + + /* Pretend the descriptor completed successfully */ + atc_chain_complete(atchan, bad_desc); +} + + +/*-- IRQ & Tasklet ---------------------------------------------------*/ + +static void atc_tasklet(unsigned long data) +{ + struct at_dma_chan *atchan = (struct at_dma_chan *)data; + + /* Channel cannot be enabled here */ + if (atc_chan_is_enabled(atchan)) { + dev_err(chan2dev(&atchan->chan_common), + "BUG: channel enabled in tasklet\n"); + return; + } + + spin_lock(&atchan->lock); + if (test_and_clear_bit(0, &atchan->error_status)) + atc_handle_error(atchan); + else + atc_advance_work(atchan); + + spin_unlock(&atchan->lock); +} + +static irqreturn_t at_dma_interrupt(int irq, void *dev_id) +{ + struct at_dma *atdma = (struct at_dma *)dev_id; + struct at_dma_chan *atchan; + int i; + u32 status, pending, imr; + int ret = IRQ_NONE; + + do { + imr = dma_readl(atdma, EBCIMR); + status = dma_readl(atdma, EBCISR); + pending = status & imr; + + if (!pending) + break; + + dev_vdbg(atdma->dma_common.dev, + "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", + status, imr, pending); + + for (i = 0; i < atdma->dma_common.chancnt; i++) { + atchan = &atdma->chan[i]; + if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) { + if (pending & AT_DMA_ERR(i)) { + /* Disable channel on AHB error */ + dma_writel(atdma, CHDR, atchan->mask); + /* Give information to tasklet */ + set_bit(0, &atchan->error_status); + } + tasklet_schedule(&atchan->tasklet); + ret = IRQ_HANDLED; + } + } + + } while (pending); + + return ret; +} + + +/*-- DMA Engine API --------------------------------------------------*/ + +/** + * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine + * @desc: descriptor at the head of the transaction chain + * + * Queue chain if DMA engine is working already + * + * Cookie increment and adding to active_list or queue must be atomic + */ +static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct at_desc *desc = txd_to_at_desc(tx); + struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); + dma_cookie_t cookie; + + spin_lock_bh(&atchan->lock); + cookie = atc_assign_cookie(atchan, desc); + + if (list_empty(&atchan->active_list)) { + dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", + desc->txd.cookie); + atc_dostart(atchan, desc); + list_add_tail(&desc->desc_node, &atchan->active_list); + } else { + dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", + desc->txd.cookie); + list_add_tail(&desc->desc_node, &atchan->queue); + } + + spin_unlock_bh(&atchan->lock); + + return cookie; +} + +/** + * atc_prep_dma_memcpy - prepare a memcpy operation + * @chan: the channel to prepare operation on + * @dest: operation virtual destination address + * @src: operation virtual source address + * @len: operation length + * @flags: tx descriptor status flags + */ +static struct dma_async_tx_descriptor * +atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_desc *desc = NULL; + struct at_desc *first = NULL; + struct at_desc *prev = NULL; + size_t xfer_count; + size_t offset; + unsigned int src_width; + unsigned int dst_width; + u32 ctrla; + u32 ctrlb; + + dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", + dest, src, len, flags); + + if (unlikely(!len)) { + dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); + return NULL; + } + + ctrla = ATC_DEFAULT_CTRLA; + ctrlb = ATC_DEFAULT_CTRLB + | ATC_SRC_ADDR_MODE_INCR + | ATC_DST_ADDR_MODE_INCR + | ATC_FC_MEM2MEM; + + /* + * We can be a lot more clever here, but this should take care + * of the most common optimization. + */ + if (!((src | dest | len) & 3)) { + ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; + src_width = dst_width = 2; + } else if (!((src | dest | len) & 1)) { + ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; + src_width = dst_width = 1; + } else { + ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; + src_width = dst_width = 0; + } + + for (offset = 0; offset < len; offset += xfer_count << src_width) { + xfer_count = min_t(size_t, (len - offset) >> src_width, + ATC_BTSIZE_MAX); + + desc = atc_desc_get(atchan); + if (!desc) + goto err_desc_get; + + desc->lli.saddr = src + offset; + desc->lli.daddr = dest + offset; + desc->lli.ctrla = ctrla | xfer_count; + desc->lli.ctrlb = ctrlb; + + desc->txd.cookie = 0; + async_tx_ack(&desc->txd); + + if (!first) { + first = desc; + } else { + /* inform the HW lli about chaining */ + prev->lli.dscr = desc->txd.phys; + /* insert the link descriptor to the LD ring */ + list_add_tail(&desc->desc_node, + &first->tx_list); + } + prev = desc; + } + + /* First descriptor of the chain embedds additional information */ + first->txd.cookie = -EBUSY; + first->len = len; + + /* set end-of-link to the last link descriptor of list*/ + set_desc_eol(desc); + + desc->txd.flags = flags; /* client is in control of this ack */ + + return &first->txd; + +err_desc_get: + atc_desc_put(atchan, first); + return NULL; +} + + +/** + * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction + * @chan: DMA channel + * @sgl: scatterlist to transfer to/from + * @sg_len: number of entries in @scatterlist + * @direction: DMA direction + * @flags: tx descriptor status flags + */ +static struct dma_async_tx_descriptor * +atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction direction, + unsigned long flags) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma_slave *atslave = chan->private; + struct at_desc *first = NULL; + struct at_desc *prev = NULL; + u32 ctrla; + u32 ctrlb; + dma_addr_t reg; + unsigned int reg_width; + unsigned int mem_width; + unsigned int i; + struct scatterlist *sg; + size_t total_len = 0; + + dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n", + direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", + flags); + + if (unlikely(!atslave || !sg_len)) { + dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); + return NULL; + } + + reg_width = atslave->reg_width; + + ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; + ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; + + switch (direction) { + case DMA_TO_DEVICE: + ctrla |= ATC_DST_WIDTH(reg_width); + ctrlb |= ATC_DST_ADDR_MODE_FIXED + | ATC_SRC_ADDR_MODE_INCR + | ATC_FC_MEM2PER; + reg = atslave->tx_reg; + for_each_sg(sgl, sg, sg_len, i) { + struct at_desc *desc; + u32 len; + u32 mem; + + desc = atc_desc_get(atchan); + if (!desc) + goto err_desc_get; + + mem = sg_phys(sg); + len = sg_dma_len(sg); + mem_width = 2; + if (unlikely(mem & 3 || len & 3)) + mem_width = 0; + + desc->lli.saddr = mem; + desc->lli.daddr = reg; + desc->lli.ctrla = ctrla + | ATC_SRC_WIDTH(mem_width) + | len >> mem_width; + desc->lli.ctrlb = ctrlb; + + if (!first) { + first = desc; + } else { + /* inform the HW lli about chaining */ + prev->lli.dscr = desc->txd.phys; + /* insert the link descriptor to the LD ring */ + list_add_tail(&desc->desc_node, + &first->tx_list); + } + prev = desc; + total_len += len; + } + break; + case DMA_FROM_DEVICE: + ctrla |= ATC_SRC_WIDTH(reg_width); + ctrlb |= ATC_DST_ADDR_MODE_INCR + | ATC_SRC_ADDR_MODE_FIXED + | ATC_FC_PER2MEM; + + reg = atslave->rx_reg; + for_each_sg(sgl, sg, sg_len, i) { + struct at_desc *desc; + u32 len; + u32 mem; + + desc = atc_desc_get(atchan); + if (!desc) + goto err_desc_get; + + mem = sg_phys(sg); + len = sg_dma_len(sg); + mem_width = 2; + if (unlikely(mem & 3 || len & 3)) + mem_width = 0; + + desc->lli.saddr = reg; + desc->lli.daddr = mem; + desc->lli.ctrla = ctrla + | ATC_DST_WIDTH(mem_width) + | len >> mem_width; + desc->lli.ctrlb = ctrlb; + + if (!first) { + first = desc; + } else { + /* inform the HW lli about chaining */ + prev->lli.dscr = desc->txd.phys; + /* insert the link descriptor to the LD ring */ + list_add_tail(&desc->desc_node, + &first->tx_list); + } + prev = desc; + total_len += len; + } + break; + default: + return NULL; + } + + /* set end-of-link to the last link descriptor of list*/ + set_desc_eol(prev); + + /* First descriptor of the chain embedds additional information */ + first->txd.cookie = -EBUSY; + first->len = total_len; + + /* last link descriptor of list is responsible of flags */ + prev->txd.flags = flags; /* client is in control of this ack */ + + return &first->txd; + +err_desc_get: + dev_err(chan2dev(chan), "not enough descriptors available\n"); + atc_desc_put(atchan, first); + return NULL; +} + +static void atc_terminate_all(struct dma_chan *chan) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma *atdma = to_at_dma(chan->device); + struct at_desc *desc, *_desc; + LIST_HEAD(list); + + /* + * This is only called when something went wrong elsewhere, so + * we don't really care about the data. Just disable the + * channel. We still have to poll the channel enable bit due + * to AHB/HSB limitations. + */ + spin_lock_bh(&atchan->lock); + + dma_writel(atdma, CHDR, atchan->mask); + + /* confirm that this channel is disabled */ + while (dma_readl(atdma, CHSR) & atchan->mask) + cpu_relax(); + + /* active_list entries will end up before queued entries */ + list_splice_init(&atchan->queue, &list); + list_splice_init(&atchan->active_list, &list); + + spin_unlock_bh(&atchan->lock); + + /* Flush all pending and queued descriptors */ + list_for_each_entry_safe(desc, _desc, &list, desc_node) + atc_chain_complete(atchan, desc); +} + +/** + * atc_is_tx_complete - poll for transaction completion + * @chan: DMA channel + * @cookie: transaction identifier to check status of + * @done: if not %NULL, updated with last completed transaction + * @used: if not %NULL, updated with last used transaction + * + * If @done and @used are passed in, upon return they reflect the driver + * internal state and can be used with dma_async_is_complete() to check + * the status of multiple cookies without re-checking hardware state. + */ +static enum dma_status +atc_is_tx_complete(struct dma_chan *chan, + dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + dma_cookie_t last_used; + dma_cookie_t last_complete; + enum dma_status ret; + + dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n", + cookie, done ? *done : 0, used ? *used : 0); + + spin_lock_bh(atchan->lock); + + last_complete = atchan->completed_cookie; + last_used = chan->cookie; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + if (ret != DMA_SUCCESS) { + atc_cleanup_descriptors(atchan); + + last_complete = atchan->completed_cookie; + last_used = chan->cookie; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + } + + spin_unlock_bh(atchan->lock); + + if (done) + *done = last_complete; + if (used) + *used = last_used; + + return ret; +} + +/** + * atc_issue_pending - try to finish work + * @chan: target DMA channel + */ +static void atc_issue_pending(struct dma_chan *chan) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + + dev_vdbg(chan2dev(chan), "issue_pending\n"); + + if (!atc_chan_is_enabled(atchan)) { + spin_lock_bh(&atchan->lock); + atc_advance_work(atchan); + spin_unlock_bh(&atchan->lock); + } +} + +/** + * atc_alloc_chan_resources - allocate resources for DMA channel + * @chan: allocate descriptor resources for this channel + * @client: current client requesting the channel be ready for requests + * + * return - the number of allocated descriptors + */ +static int atc_alloc_chan_resources(struct dma_chan *chan) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma *atdma = to_at_dma(chan->device); + struct at_desc *desc; + struct at_dma_slave *atslave; + int i; + u32 cfg; + LIST_HEAD(tmp_list); + + dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); + + /* ASSERT: channel is idle */ + if (atc_chan_is_enabled(atchan)) { + dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); + return -EIO; + } + + cfg = ATC_DEFAULT_CFG; + + atslave = chan->private; + if (atslave) { + /* + * We need controller-specific data to set up slave + * transfers. + */ + BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); + + /* if cfg configuration specified take it instad of default */ + if (atslave->cfg) + cfg = atslave->cfg; + } + + /* have we already been set up? + * reconfigure channel but no need to reallocate descriptors */ + if (!list_empty(&atchan->free_list)) + return atchan->descs_allocated; + + /* Allocate initial pool of descriptors */ + for (i = 0; i < init_nr_desc_per_channel; i++) { + desc = atc_alloc_descriptor(chan, GFP_KERNEL); + if (!desc) { + dev_err(atdma->dma_common.dev, + "Only %d initial descriptors\n", i); + break; + } + list_add_tail(&desc->desc_node, &tmp_list); + } + + spin_lock_bh(&atchan->lock); + atchan->descs_allocated = i; + list_splice(&tmp_list, &atchan->free_list); + atchan->completed_cookie = chan->cookie = 1; + spin_unlock_bh(&atchan->lock); + + /* channel parameters */ + channel_writel(atchan, CFG, cfg); + + dev_dbg(chan2dev(chan), + "alloc_chan_resources: allocated %d descriptors\n", + atchan->descs_allocated); + + return atc |