aboutsummaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig37
-rw-r--r--drivers/dma/Makefile6
-rw-r--r--drivers/dma/amba-pl08x.c941
-rw-r--r--drivers/dma/at_hdmac.c11
-rw-r--r--drivers/dma/coh901318.c72
-rw-r--r--drivers/dma/dmaengine.c20
-rw-r--r--drivers/dma/dw_dmac.c182
-rw-r--r--drivers/dma/dw_dmac_regs.h8
-rw-r--r--drivers/dma/ipu/ipu_idmac.c8
-rw-r--r--drivers/dma/ipu/ipu_irq.c14
-rw-r--r--drivers/dma/mmp_tdma.c610
-rw-r--r--drivers/dma/mxs-dma.c3
-rw-r--r--drivers/dma/omap-dma.c669
-rw-r--r--drivers/dma/sa11x0-dma.c388
-rw-r--r--drivers/dma/sh/Makefile2
-rw-r--r--drivers/dma/sh/shdma-base.c943
-rw-r--r--drivers/dma/sh/shdma.c955
-rw-r--r--drivers/dma/sh/shdma.h (renamed from drivers/dma/shdma.h)46
-rw-r--r--drivers/dma/shdma.c1524
-rw-r--r--drivers/dma/tegra20-apb-dma.c1415
-rw-r--r--drivers/dma/virt-dma.c123
-rw-r--r--drivers/dma/virt-dma.h152
22 files changed, 5738 insertions, 2391 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index aadeb5be9db..d06ea2950dd 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -53,6 +53,7 @@ config AMBA_PL08X
bool "ARM PrimeCell PL080 or PL081 support"
depends on ARM_AMBA && EXPERIMENTAL
select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
help
Platform has a PL08x DMAC device
which can provide DMA engine support
@@ -148,6 +149,20 @@ config TXX9_DMAC
Support the TXx9 SoC internal DMA controller. This can be
integrated in chips such as the Toshiba TX4927/38/39.
+config TEGRA20_APB_DMA
+ bool "NVIDIA Tegra20 APB DMA support"
+ depends on ARCH_TEGRA
+ select DMA_ENGINE
+ help
+ Support for the NVIDIA Tegra20 APB DMA controller driver. The
+ DMA controller is having multiple DMA channel which can be
+ configured for different peripherals like audio, UART, SPI,
+ I2C etc which is in APB bus.
+ This DMA controller transfers data from memory to peripheral fifo
+ or vice versa. It does not support memory to memory data transfer.
+
+
+
config SH_DMAE
tristate "Renesas SuperH DMAC support"
depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
@@ -237,7 +252,7 @@ config IMX_DMA
config MXS_DMA
bool "MXS DMA support"
- depends on SOC_IMX23 || SOC_IMX28
+ depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
select STMP_DEVICE
select DMA_ENGINE
help
@@ -255,14 +270,34 @@ config DMA_SA11X0
tristate "SA-11x0 DMA support"
depends on ARCH_SA1100
select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
help
Support the DMA engine found on Intel StrongARM SA-1100 and
SA-1110 SoCs. This DMA engine can only be used with on-chip
devices.
+config MMP_TDMA
+ bool "MMP Two-Channel DMA support"
+ depends on ARCH_MMP
+ select DMA_ENGINE
+ help
+ Support the MMP Two-Channel DMA engine.
+ This engine used for MMP Audio DMA and pxa910 SQU.
+
+ Say Y here if you enabled MMP ADMA, otherwise say N.
+
+config DMA_OMAP
+ tristate "OMAP DMA support"
+ depends on ARCH_OMAP
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+
config DMA_ENGINE
bool
+config DMA_VIRTUAL_CHANNELS
+ tristate
+
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 86b795baba9..4cf6b128ab9 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -2,6 +2,7 @@ ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG
ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
+obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
obj-$(CONFIG_DMATEST) += dmatest.o
@@ -14,7 +15,7 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
-obj-$(CONFIG_SH_DMAE) += shdma.o
+obj-$(CONFIG_SH_DMAE) += sh/
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
@@ -23,8 +24,11 @@ obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
+obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
+obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
+obj-$(CONFIG_DMA_OMAP) += omap-dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 49ecbbb8932..6fbeebb9486 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -86,10 +86,12 @@
#include <asm/hardware/pl080.h>
#include "dmaengine.h"
+#include "virt-dma.h"
#define DRIVER_NAME "pl08xdmac"
static struct amba_driver pl08x_amba_driver;
+struct pl08x_driver_data;
/**
* struct vendor_data - vendor-specific config parameters for PL08x derivatives
@@ -119,6 +121,123 @@ struct pl08x_lli {
};
/**
+ * struct pl08x_bus_data - information of source or destination
+ * busses for a transfer
+ * @addr: current address
+ * @maxwidth: the maximum width of a transfer on this bus
+ * @buswidth: the width of this bus in bytes: 1, 2 or 4
+ */
+struct pl08x_bus_data {
+ dma_addr_t addr;
+ u8 maxwidth;
+ u8 buswidth;
+};
+
+/**
+ * struct pl08x_phy_chan - holder for the physical channels
+ * @id: physical index to this channel
+ * @lock: a lock to use when altering an instance of this struct
+ * @serving: the virtual channel currently being served by this physical
+ * channel
+ * @locked: channel unavailable for the system, e.g. dedicated to secure
+ * world
+ */
+struct pl08x_phy_chan {
+ unsigned int id;
+ void __iomem *base;
+ spinlock_t lock;
+ struct pl08x_dma_chan *serving;
+ bool locked;
+};
+
+/**
+ * struct pl08x_sg - structure containing data per sg
+ * @src_addr: src address of sg
+ * @dst_addr: dst address of sg
+ * @len: transfer len in bytes
+ * @node: node for txd's dsg_list
+ */
+struct pl08x_sg {
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ size_t len;
+ struct list_head node;
+};
+
+/**
+ * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
+ * @vd: virtual DMA descriptor
+ * @dsg_list: list of children sg's
+ * @llis_bus: DMA memory address (physical) start for the LLIs
+ * @llis_va: virtual memory address start for the LLIs
+ * @cctl: control reg values for current txd
+ * @ccfg: config reg values for current txd
+ * @done: this marks completed descriptors, which should not have their
+ * mux released.
+ */
+struct pl08x_txd {
+ struct virt_dma_desc vd;
+ struct list_head dsg_list;
+ dma_addr_t llis_bus;
+ struct pl08x_lli *llis_va;
+ /* Default cctl value for LLIs */
+ u32 cctl;
+ /*
+ * Settings to be put into the physical channel when we
+ * trigger this txd. Other registers are in llis_va[0].
+ */
+ u32 ccfg;
+ bool done;
+};
+
+/**
+ * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
+ * states
+ * @PL08X_CHAN_IDLE: the channel is idle
+ * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
+ * channel and is running a transfer on it
+ * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
+ * channel, but the transfer is currently paused
+ * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
+ * channel to become available (only pertains to memcpy channels)
+ */
+enum pl08x_dma_chan_state {
+ PL08X_CHAN_IDLE,
+ PL08X_CHAN_RUNNING,
+ PL08X_CHAN_PAUSED,
+ PL08X_CHAN_WAITING,
+};
+
+/**
+ * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
+ * @vc: wrappped virtual channel
+ * @phychan: the physical channel utilized by this channel, if there is one
+ * @name: name of channel
+ * @cd: channel platform data
+ * @runtime_addr: address for RX/TX according to the runtime config
+ * @at: active transaction on this channel
+ * @lock: a lock for this channel data
+ * @host: a pointer to the host (internal use)
+ * @state: whether the channel is idle, paused, running etc
+ * @slave: whether this channel is a device (slave) or for memcpy
+ * @signal: the physical DMA request signal which this channel is using
+ * @mux_use: count of descriptors using this DMA request signal setting
+ */
+struct pl08x_dma_chan {
+ struct virt_dma_chan vc;
+ struct pl08x_phy_chan *phychan;
+ const char *name;
+ const struct pl08x_channel_data *cd;
+ struct dma_slave_config cfg;
+ struct pl08x_txd *at;
+ struct pl08x_driver_data *host;
+ enum pl08x_dma_chan_state state;
+ bool slave;
+ int signal;
+ unsigned mux_use;
+};
+
+/**
* struct pl08x_driver_data - the local state holder for the PL08x
* @slave: slave engine for this instance
* @memcpy: memcpy engine for this instance
@@ -128,7 +247,6 @@ struct pl08x_lli {
* @pd: platform data passed in from the platform/machine
* @phy_chans: array of data for the physical channels
* @pool: a pool for the LLI descriptors
- * @pool_ctr: counter of LLIs in the pool
* @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
* fetches
* @mem_buses: set to indicate memory transfers on AHB2.
@@ -143,10 +261,8 @@ struct pl08x_driver_data {
struct pl08x_platform_data *pd;
struct pl08x_phy_chan *phy_chans;
struct dma_pool *pool;
- int pool_ctr;
u8 lli_buses;
u8 mem_buses;
- spinlock_t lock;
};
/*
@@ -162,12 +278,51 @@ struct pl08x_driver_data {
static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
{
- return container_of(chan, struct pl08x_dma_chan, chan);
+ return container_of(chan, struct pl08x_dma_chan, vc.chan);
}
static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
{
- return container_of(tx, struct pl08x_txd, tx);
+ return container_of(tx, struct pl08x_txd, vd.tx);
+}
+
+/*
+ * Mux handling.
+ *
+ * This gives us the DMA request input to the PL08x primecell which the
+ * peripheral described by the channel data will be routed to, possibly
+ * via a board/SoC specific external MUX. One important point to note
+ * here is that this does not depend on the physical channel.
+ */
+static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
+{
+ const struct pl08x_platform_data *pd = plchan->host->pd;
+ int ret;
+
+ if (plchan->mux_use++ == 0 && pd->get_signal) {
+ ret = pd->get_signal(plchan->cd);
+ if (ret < 0) {
+ plchan->mux_use = 0;
+ return ret;
+ }
+
+ plchan->signal = ret;
+ }
+ return 0;
+}
+
+static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
+{
+ const struct pl08x_platform_data *pd = plchan->host->pd;
+
+ if (plchan->signal >= 0) {
+ WARN_ON(plchan->mux_use == 0);
+
+ if (--plchan->mux_use == 0 && pd->put_signal) {
+ pd->put_signal(plchan->cd, plchan->signal);
+ plchan->signal = -1;
+ }
+ }
}
/*
@@ -189,20 +344,25 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
* been set when the LLIs were constructed. Poke them into the hardware
* and start the transfer.
*/
-static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
- struct pl08x_txd *txd)
+static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
{
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_phy_chan *phychan = plchan->phychan;
- struct pl08x_lli *lli = &txd->llis_va[0];
+ struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
+ struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
+ struct pl08x_lli *lli;
u32 val;
+ list_del(&txd->vd.node);
+
plchan->at = txd;
/* Wait for channel inactive */
while (pl08x_phy_channel_busy(phychan))
cpu_relax();
+ lli = &txd->llis_va[0];
+
dev_vdbg(&pl08x->adev->dev,
"WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
"clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
@@ -311,10 +471,8 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
{
struct pl08x_phy_chan *ch;
struct pl08x_txd *txd;
- unsigned long flags;
size_t bytes = 0;
- spin_lock_irqsave(&plchan->lock, flags);
ch = plchan->phychan;
txd = plchan->at;
@@ -354,18 +512,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
}
}
- /* Sum up all queued transactions */
- if (!list_empty(&plchan->pend_list)) {
- struct pl08x_txd *txdi;
- list_for_each_entry(txdi, &plchan->pend_list, node) {
- struct pl08x_sg *dsg;
- list_for_each_entry(dsg, &txd->dsg_list, node)
- bytes += dsg->len;
- }
- }
-
- spin_unlock_irqrestore(&plchan->lock, flags);
-
return bytes;
}
@@ -391,7 +537,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
if (!ch->locked && !ch->serving) {
ch->serving = virt_chan;
- ch->signal = -1;
spin_unlock_irqrestore(&ch->lock, flags);
break;
}
@@ -404,25 +549,114 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
return NULL;
}
- pm_runtime_get_sync(&pl08x->adev->dev);
return ch;
}
+/* Mark the physical channel as free. Note, this write is atomic. */
static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
struct pl08x_phy_chan *ch)
{
- unsigned long flags;
+ ch->serving = NULL;
+}
+
+/*
+ * Try to allocate a physical channel. When successful, assign it to
+ * this virtual channel, and initiate the next descriptor. The
+ * virtual channel lock must be held at this point.
+ */
+static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_phy_chan *ch;
- spin_lock_irqsave(&ch->lock, flags);
+ ch = pl08x_get_phy_channel(pl08x, plchan);
+ if (!ch) {
+ dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
+ plchan->state = PL08X_CHAN_WAITING;
+ return;
+ }
- /* Stop the channel and clear its interrupts */
- pl08x_terminate_phy_chan(pl08x, ch);
+ dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
+ ch->id, plchan->name);
- pm_runtime_put(&pl08x->adev->dev);
+ plchan->phychan = ch;
+ plchan->state = PL08X_CHAN_RUNNING;
+ pl08x_start_next_txd(plchan);
+}
- /* Mark it as free */
- ch->serving = NULL;
- spin_unlock_irqrestore(&ch->lock, flags);
+static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch,
+ struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_driver_data *pl08x = plchan->host;
+
+ dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n",
+ ch->id, plchan->name);
+
+ /*
+ * We do this without taking the lock; we're really only concerned
+ * about whether this pointer is NULL or not, and we're guaranteed
+ * that this will only be called when it _already_ is non-NULL.
+ */
+ ch->serving = plchan;
+ plchan->phychan = ch;
+ plchan->state = PL08X_CHAN_RUNNING;
+ pl08x_start_next_txd(plchan);
+}
+
+/*
+ * Free a physical DMA channel, potentially reallocating it to another
+ * virtual channel if we have any pending.
+ */
+static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_dma_chan *p, *next;
+
+ retry:
+ next = NULL;
+
+ /* Find a waiting virtual channel for the next transfer. */
+ list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
+ if (p->state == PL08X_CHAN_WAITING) {
+ next = p;
+ break;
+ }
+
+ if (!next) {
+ list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
+ if (p->state == PL08X_CHAN_WAITING) {
+ next = p;
+ break;
+ }
+ }
+
+ /* Ensure that the physical channel is stopped */
+ pl08x_terminate_phy_chan(pl08x, plchan->phychan);
+
+ if (next) {
+ bool success;
+
+ /*
+ * Eww. We know this isn't going to deadlock
+ * but lockdep probably doesn't.
+ */
+ spin_lock(&next->vc.lock);
+ /* Re-check the state now that we have the lock */
+ success = next->state == PL08X_CHAN_WAITING;
+ if (success)
+ pl08x_phy_reassign_start(plchan->phychan, next);
+ spin_unlock(&next->vc.lock);
+
+ /* If the state changed, try to find another channel */
+ if (!success)
+ goto retry;
+ } else {
+ /* No more jobs, so free up the physical channel */
+ pl08x_put_phy_channel(pl08x, plchan->phychan);
+ }
+
+ plchan->phychan = NULL;
+ plchan->state = PL08X_CHAN_IDLE;
}
/*
@@ -585,8 +819,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
return 0;
}
- pl08x->pool_ctr++;
-
bd.txd = txd;
bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
cctl = txd->cctl;
@@ -802,18 +1034,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
return num_llis;
}
-/* You should call this with the struct pl08x lock held */
static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
struct pl08x_txd *txd)
{
struct pl08x_sg *dsg, *_dsg;
- /* Free the LLI */
if (txd->llis_va)
dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
- pl08x->pool_ctr--;
-
list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
list_del(&dsg->node);
kfree(dsg);
@@ -822,133 +1050,75 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
kfree(txd);
}
-static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
- struct pl08x_dma_chan *plchan)
+static void pl08x_unmap_buffers(struct pl08x_txd *txd)
{
- struct pl08x_txd *txdi = NULL;
- struct pl08x_txd *next;
-
- if (!list_empty(&plchan->pend_list)) {
- list_for_each_entry_safe(txdi,
- next, &plchan->pend_list, node) {
- list_del(&txdi->node);
- pl08x_free_txd(pl08x, txdi);
+ struct device *dev = txd->vd.tx.chan->device->dev;
+ struct pl08x_sg *dsg;
+
+ if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
+ else {
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
}
}
+ if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
+ else
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
+ }
}
-/*
- * The DMA ENGINE API
- */
-static int pl08x_alloc_chan_resources(struct dma_chan *chan)
+static void pl08x_desc_free(struct virt_dma_desc *vd)
{
- return 0;
-}
+ struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
-static void pl08x_free_chan_resources(struct dma_chan *chan)
-{
+ if (!plchan->slave)
+ pl08x_unmap_buffers(txd);
+
+ if (!txd->done)
+ pl08x_release_mux(plchan);
+
+ pl08x_free_txd(plchan->host, txd);
}
-/*
- * This should be called with the channel plchan->lock held
- */
-static int prep_phy_channel(struct pl08x_dma_chan *plchan,
- struct pl08x_txd *txd)
+static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
+ struct pl08x_dma_chan *plchan)
{
- struct pl08x_driver_data *pl08x = plchan->host;
- struct pl08x_phy_chan *ch;
- int ret;
-
- /* Check if we already have a channel */
- if (plchan->phychan) {
- ch = plchan->phychan;
- goto got_channel;
- }
+ LIST_HEAD(head);
+ struct pl08x_txd *txd;
- ch = pl08x_get_phy_channel(pl08x, plchan);
- if (!ch) {
- /* No physical channel available, cope with it */
- dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
- return -EBUSY;
- }
+ vchan_get_all_descriptors(&plchan->vc, &head);
- /*
- * OK we have a physical channel: for memcpy() this is all we
- * need, but for slaves the physical signals may be muxed!
- * Can the platform allow us to use this channel?
- */
- if (plchan->slave && pl08x->pd->get_signal) {
- ret = pl08x->pd->get_signal(plchan);
- if (ret < 0) {
- dev_dbg(&pl08x->adev->dev,
- "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
- ch->id, plchan->name);
- /* Release physical channel & return */
- pl08x_put_phy_channel(pl08x, ch);
- return -EBUSY;
- }
- ch->signal = ret;
+ while (!list_empty(&head)) {
+ txd = list_first_entry(&head, struct pl08x_txd, vd.node);
+ list_del(&txd->vd.node);
+ pl08x_desc_free(&txd->vd);
}
-
- plchan->phychan = ch;
- dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
- ch->id,
- ch->signal,
- plchan->name);
-
-got_channel:
- /* Assign the flow control signal to this channel */
- if (txd->direction == DMA_MEM_TO_DEV)
- txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
- else if (txd->direction == DMA_DEV_TO_MEM)
- txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
-
- plchan->phychan_hold++;
-
- return 0;
}
-static void release_phy_channel(struct pl08x_dma_chan *plchan)
+/*
+ * The DMA ENGINE API
+ */
+static int pl08x_alloc_chan_resources(struct dma_chan *chan)
{
- struct pl08x_driver_data *pl08x = plchan->host;
-
- if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
- pl08x->pd->put_signal(plchan);
- plchan->phychan->signal = -1;
- }
- pl08x_put_phy_channel(pl08x, plchan->phychan);
- plchan->phychan = NULL;
+ return 0;
}
-static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
+static void pl08x_free_chan_resources(struct dma_chan *chan)
{
- struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
- struct pl08x_txd *txd = to_pl08x_txd(tx);
- unsigned long flags;
- dma_cookie_t cookie;
-
- spin_lock_irqsave(&plchan->lock, flags);
- cookie = dma_cookie_assign(tx);
-
- /* Put this onto the pending list */
- list_add_tail(&txd->node, &plchan->pend_list);
-
- /*
- * If there was no physical channel available for this memcpy,
- * stack the request up and indicate that the channel is waiting
- * for a free physical channel.
- */
- if (!plchan->slave && !plchan->phychan) {
- /* Do this memcpy whenever there is a channel ready */
- plchan->state = PL08X_CHAN_WAITING;
- plchan->waiting = txd;
- } else {
- plchan->phychan_hold--;
- }
-
- spin_unlock_irqrestore(&plchan->lock, flags);
-
- return cookie;
+ /* Ensure all queued descriptors are freed */
+ vchan_free_chan_resources(to_virt_chan(chan));
}
static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
@@ -968,23 +1138,53 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct virt_dma_desc *vd;
+ unsigned long flags;
enum dma_status ret;
+ size_t bytes = 0;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS)
return ret;
/*
+ * There's no point calculating the residue if there's
+ * no txstate to store the value.
+ */
+ if (!txstate) {
+ if (plchan->state == PL08X_CHAN_PAUSED)
+ ret = DMA_PAUSED;
+ return ret;
+ }
+
+ spin_lock_irqsave(&plchan->vc.lock, flags);
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret != DMA_SUCCESS) {
+ vd = vchan_find_desc(&plchan->vc, cookie);
+ if (vd) {
+ /* On the issued list, so hasn't been processed yet */
+ struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
+ struct pl08x_sg *dsg;
+
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ bytes += dsg->len;
+ } else {
+ bytes = pl08x_getbytes_chan(plchan);
+ }
+ }
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
+
+ /*
* This cookie not complete yet
* Get number of bytes left in the active transactions and queue
*/
- dma_set_residue(txstate, pl08x_getbytes_chan(plchan));
+ dma_set_residue(txstate, bytes);
- if (plchan->state == PL08X_CHAN_PAUSED)
- return DMA_PAUSED;
+ if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS)
+ ret = DMA_PAUSED;
/* Whether waiting or running, we're in progress */
- return DMA_IN_PROGRESS;
+ return ret;
}
/* PrimeCell DMA extension */
@@ -1080,38 +1280,14 @@ static u32 pl08x_burst(u32 maxburst)
return burst_sizes[i].reg;
}
-static int dma_set_runtime_config(struct dma_chan *chan,
- struct dma_slave_config *config)
+static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
+ enum dma_slave_buswidth addr_width, u32 maxburst)
{
- struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
- struct pl08x_driver_data *pl08x = plchan->host;
- enum dma_slave_buswidth addr_width;
- u32 width, burst, maxburst;
- u32 cctl = 0;
-
- if (!plchan->slave)
- return -EINVAL;
-
- /* Transfer direction */
- plchan->runtime_direction = config->direction;
- if (config->direction == DMA_MEM_TO_DEV) {
- addr_width = config->dst_addr_width;
- maxburst = config->dst_maxburst;
- } else if (config->direction == DMA_DEV_TO_MEM) {
- addr_width = config->src_addr_width;
- maxburst = config->src_maxburst;
- } else {
- dev_err(&pl08x->adev->dev,
- "bad runtime_config: alien transfer direction\n");
- return -EINVAL;
- }
+ u32 width, burst, cctl = 0;
width = pl08x_width(addr_width);
- if (width == ~0) {
- dev_err(&pl08x->adev->dev,
- "bad runtime_config: alien address width\n");
- return -EINVAL;
- }
+ if (width == ~0)
+ return ~0;
cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
@@ -1128,28 +1304,23 @@ static int dma_set_runtime_config(struct dma_chan *chan,
cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
- plchan->device_fc = config->device_fc;
+ return pl08x_cctl(cctl);
+}
- if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
- plchan->src_addr = config->src_addr;
- plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
- pl08x_select_bus(plchan->cd->periph_buses,
- pl08x->mem_buses);
- } else {
- plchan->dst_addr = config->dst_addr;
- plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
- pl08x_select_bus(pl08x->mem_buses,
- plchan->cd->periph_buses);
- }
+static int dma_set_runtime_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
- dev_dbg(&pl08x->adev->dev,
- "configured channel %s (%s) for %s, data width %d, "
- "maxburst %d words, LE, CCTL=0x%08x\n",
- dma_chan_name(chan), plchan->name,
- (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
- addr_width,
- maxburst,
- cctl);
+ if (!plchan->slave)
+ return -EINVAL;
+
+ /* Reject definitely invalid configurations */
+ if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+ return -EINVAL;
+
+ plchan->cfg = *config;
return 0;
}
@@ -1163,95 +1334,19 @@ static void pl08x_issue_pending(struct dma_chan *chan)
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
unsigned long flags;
- spin_lock_irqsave(&plchan->lock, flags);
- /* Something is already active, or we're waiting for a channel... */
- if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
- spin_unlock_irqrestore(&plchan->lock, flags);
- return;
- }
-
- /* Take the first element in the queue and execute it */
- if (!list_empty(&plchan->pend_list)) {
- struct pl08x_txd *next;
-
- next = list_first_entry(&plchan->pend_list,
- struct pl08x_txd,
- node);
- list_del(&next->node);
- plchan->state = PL08X_CHAN_RUNNING;
-
- pl08x_start_txd(plchan, next);
+ spin_lock_irqsave(&plchan->vc.lock, flags);
+ if (vchan_issue_pending(&plchan->vc)) {
+ if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
+ pl08x_phy_alloc_and_start(plchan);
}
-
- spin_unlock_irqrestore(&plchan->lock, flags);
-}
-
-static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
- struct pl08x_txd *txd)
-{
- struct pl08x_driver_data *pl08x = plchan->host;
- unsigned long flags;
- int num_llis, ret;
-
- num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
- if (!num_llis) {
- spin_lock_irqsave(&plchan->lock, flags);
- pl08x_free_txd(pl08x, txd);
- spin_unlock_irqrestore(&plchan->lock, flags);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&plchan->lock, flags);
-
- /*
- * See if we already have a physical channel allocated,
- * else this is the time to try to get one.
- */
- ret = prep_phy_channel(plchan, txd);
- if (ret) {
- /*
- * No physical channel was available.
- *
- * memcpy transfers can be sorted out at submission time.
- *
- * Slave transfers may have been denied due to platform
- * channel muxing restrictions. Since there is no guarantee
- * that this will ever be resolved, and the signal must be
- * acquired AFTER acquiring the physical channel, we will let
- * them be NACK:ed with -EBUSY here. The drivers can retry
- * the prep() call if they are eager on doing this using DMA.
- */
- if (plchan->slave) {
- pl08x_free_txd_list(pl08x, plchan);
- pl08x_free_txd(pl08x, txd);
- spin_unlock_irqrestore(&plchan->lock, flags);
- return -EBUSY;
- }
- } else
- /*
- * Else we're all set, paused and ready to roll, status
- * will switch to PL08X_CHAN_RUNNING when we call
- * issue_pending(). If there is something running on the
- * channel already we don't change its state.
- */
- if (plchan->state == PL08X_CHAN_IDLE)
- plchan->state = PL08X_CHAN_PAUSED;
-
- spin_unlock_irqrestore(&plchan->lock, flags);
-
- return 0;
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
}
-static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
- unsigned long flags)
+static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
{
struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
if (txd) {
- dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
- txd->tx.flags = flags;
- txd