aboutsummaryrefslogtreecommitdiff
path: root/drivers/dma/mmp_pdma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/mmp_pdma.c')
-rw-r--r--drivers/dma/mmp_pdma.c559
1 files changed, 405 insertions, 154 deletions
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index c6d98c00f05..a7b186d536b 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -5,6 +5,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -17,7 +19,9 @@
#include <linux/platform_data/mmp_dma.h>
#include <linux/dmapool.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <linux/of.h>
+#include <linux/dma/mmp-pdma.h>
#include "dmaengine.h"
@@ -25,40 +29,41 @@
#define DALGN 0x00a0
#define DINT 0x00f0
#define DDADR 0x0200
-#define DSADR 0x0204
-#define DTADR 0x0208
+#define DSADR(n) (0x0204 + ((n) << 4))
+#define DTADR(n) (0x0208 + ((n) << 4))
#define DCMD 0x020c
-#define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
-#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
-#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
-#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
-#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
-#define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
-#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
-#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
-
-#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
-#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
-#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
-#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
-#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
-#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
-#define DCSR_EORINTR (1 << 9) /* The end of Receive */
-
-#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
-#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
+#define DCSR_RUN BIT(31) /* Run Bit (read / write) */
+#define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
+#define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */
+#define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
+#define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
+#define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
+#define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
+#define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
+
+#define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */
+#define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
+#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
+#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
+#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
+#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
+#define DCSR_EORINTR BIT(9) /* The end of Receive */
+
+#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
+#define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
+#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
-#define DDADR_STOP (1 << 0) /* Stop (read / write) */
-
-#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
-#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
-#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
-#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
-#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
-#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
-#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
+#define DDADR_STOP BIT(0) /* Stop (read / write) */
+
+#define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
+#define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
+#define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
+#define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
+#define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
+#define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
+#define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
@@ -68,7 +73,7 @@
#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
#define PDMA_ALIGNMENT 3
-#define PDMA_MAX_DESC_BYTES 0x1000
+#define PDMA_MAX_DESC_BYTES DCMD_LENGTH
struct mmp_pdma_desc_hw {
u32 ddadr; /* Points to the next descriptor + flags */
@@ -93,6 +98,9 @@ struct mmp_pdma_chan {
struct mmp_pdma_phy *phy;
enum dma_transfer_direction dir;
+ struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel
+ * is in cyclic mode */
+
/* channel's basic info */
struct tasklet_struct tasklet;
u32 dcmd;
@@ -104,6 +112,7 @@ struct mmp_pdma_chan {
struct list_head chain_pending; /* Link descriptors queue for pending */
struct list_head chain_running; /* Link descriptors queue for running */
bool idle; /* channel statue machine */
+ bool byte_align;
struct dma_pool *desc_pool; /* Descriptors pool */
};
@@ -120,12 +129,17 @@ struct mmp_pdma_device {
struct device *dev;
struct dma_device device;
struct mmp_pdma_phy *phy;
+ spinlock_t phy_lock; /* protect alloc/free phy channels */
};
-#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
-#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
-#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
-#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
+#define tx_to_mmp_pdma_desc(tx) \
+ container_of(tx, struct mmp_pdma_desc_sw, async_tx)
+#define to_mmp_pdma_desc(lh) \
+ container_of(lh, struct mmp_pdma_desc_sw, node)
+#define to_mmp_pdma_chan(dchan) \
+ container_of(dchan, struct mmp_pdma_chan, chan)
+#define to_mmp_pdma_dev(dmadev) \
+ container_of(dmadev, struct mmp_pdma_device, device)
static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
{
@@ -136,29 +150,34 @@ static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
static void enable_chan(struct mmp_pdma_phy *phy)
{
- u32 reg;
+ u32 reg, dalgn;
if (!phy->vchan)
return;
- reg = phy->vchan->drcmr;
- reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
+ reg = DRCMR(phy->vchan->drcmr);
writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+ dalgn = readl(phy->base + DALGN);
+ if (phy->vchan->byte_align)
+ dalgn |= 1 << phy->idx;
+ else
+ dalgn &= ~(1 << phy->idx);
+ writel(dalgn, phy->base + DALGN);
+
reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) | DCSR_RUN,
- phy->base + reg);
+ writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
}
static void disable_chan(struct mmp_pdma_phy *phy)
{
u32 reg;
- if (phy) {
- reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) & ~DCSR_RUN,
- phy->base + reg);
- }
+ if (!phy)
+ return;
+
+ reg = (phy->idx << 2) + DCSR;
+ writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
}
static int clear_chan_irq(struct mmp_pdma_phy *phy)
@@ -167,26 +186,27 @@ static int clear_chan_irq(struct mmp_pdma_phy *phy)
u32 dint = readl(phy->base + DINT);
u32 reg = (phy->idx << 2) + DCSR;
- if (dint & BIT(phy->idx)) {
- /* clear irq */
- dcsr = readl(phy->base + reg);
- writel(dcsr, phy->base + reg);
- if ((dcsr & DCSR_BUSERR) && (phy->vchan))
- dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
- return 0;
- }
- return -EAGAIN;
+ if (!(dint & BIT(phy->idx)))
+ return -EAGAIN;
+
+ /* clear irq */
+ dcsr = readl(phy->base + reg);
+ writel(dcsr, phy->base + reg);
+ if ((dcsr & DCSR_BUSERR) && (phy->vchan))
+ dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
+
+ return 0;
}
static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
{
struct mmp_pdma_phy *phy = dev_id;
- if (clear_chan_irq(phy) == 0) {
- tasklet_schedule(&phy->vchan->tasklet);
- return IRQ_HANDLED;
- } else
+ if (clear_chan_irq(phy) != 0)
return IRQ_NONE;
+
+ tasklet_schedule(&phy->vchan->tasklet);
+ return IRQ_HANDLED;
}
static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
@@ -208,8 +228,8 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
if (irq_num)
return IRQ_HANDLED;
- else
- return IRQ_NONE;
+
+ return IRQ_NONE;
}
/* lookup free phy channel as descending priority */
@@ -217,7 +237,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
{
int prio, i;
struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
- struct mmp_pdma_phy *phy;
+ struct mmp_pdma_phy *phy, *found = NULL;
+ unsigned long flags;
/*
* dma channel priorities
@@ -226,38 +247,43 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
* ch 8 - 11, 24 - 27 <--> (2)
* ch 12 - 15, 28 - 31 <--> (3)
*/
- for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
+
+ spin_lock_irqsave(&pdev->phy_lock, flags);
+ for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
for (i = 0; i < pdev->dma_channels; i++) {
- if (prio != ((i & 0xf) >> 2))
+ if (prio != (i & 0xf) >> 2)
continue;
phy = &pdev->phy[i];
if (!phy->vchan) {
phy->vchan = pchan;
- return phy;
+ found = phy;
+ goto out_unlock;
}
}
}
- return NULL;
+out_unlock:
+ spin_unlock_irqrestore(&pdev->phy_lock, flags);
+ return found;
}
-/* desc->tx_list ==> pending list */
-static void append_pending_queue(struct mmp_pdma_chan *chan,
- struct mmp_pdma_desc_sw *desc)
+static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
{
- struct mmp_pdma_desc_sw *tail =
- to_mmp_pdma_desc(chan->chain_pending.prev);
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
+ unsigned long flags;
+ u32 reg;
- if (list_empty(&chan->chain_pending))
- goto out_splice;
+ if (!pchan->phy)
+ return;
- /* one irq per queue, even appended */
- tail->desc.ddadr = desc->async_tx.phys;
- tail->desc.dcmd &= ~DCMD_ENDIRQEN;
+ /* clear the channel mapping in DRCMR */
+ reg = DRCMR(pchan->drcmr);
+ writel(0, pchan->phy->base + reg);
- /* softly link to pending list */
-out_splice:
- list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
+ spin_lock_irqsave(&pdev->phy_lock, flags);
+ pchan->phy->vchan = NULL;
+ pchan->phy = NULL;
+ spin_unlock_irqrestore(&pdev->phy_lock, flags);
}
/**
@@ -276,10 +302,7 @@ static void start_pending_queue(struct mmp_pdma_chan *chan)
if (list_empty(&chan->chain_pending)) {
/* chance to re-fetch phy channel with higher prio */
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
dev_dbg(chan->dev, "no pending list\n");
return;
}
@@ -325,14 +348,16 @@ static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(&child->async_tx);
}
- append_pending_queue(chan, desc);
+ /* softly link to pending list - desc->tx_list ==> pending list */
+ list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
spin_unlock_irqrestore(&chan->desc_lock, flags);
return cookie;
}
-struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
+static struct mmp_pdma_desc_sw *
+mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
{
struct mmp_pdma_desc_sw *desc;
dma_addr_t pdesc;
@@ -368,25 +393,24 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
if (chan->desc_pool)
return 1;
- chan->desc_pool =
- dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
- sizeof(struct mmp_pdma_desc_sw),
- __alignof__(struct mmp_pdma_desc_sw), 0);
+ chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
+ chan->dev,
+ sizeof(struct mmp_pdma_desc_sw),
+ __alignof__(struct mmp_pdma_desc_sw),
+ 0);
if (!chan->desc_pool) {
dev_err(chan->dev, "unable to allocate descriptor pool\n");
return -ENOMEM;
}
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+
+ mmp_pdma_free_phy(chan);
chan->idle = true;
chan->dev_addr = 0;
return 1;
}
static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
- struct list_head *list)
+ struct list_head *list)
{
struct mmp_pdma_desc_sw *desc, *_desc;
@@ -410,17 +434,14 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
chan->desc_pool = NULL;
chan->idle = true;
chan->dev_addr = 0;
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
return;
}
static struct dma_async_tx_descriptor *
mmp_pdma_prep_memcpy(struct dma_chan *dchan,
- dma_addr_t dma_dst, dma_addr_t dma_src,
- size_t len, unsigned long flags)
+ dma_addr_t dma_dst, dma_addr_t dma_src,
+ size_t len, unsigned long flags)
{
struct mmp_pdma_chan *chan;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
@@ -433,6 +454,7 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
return NULL;
chan = to_mmp_pdma_chan(dchan);
+ chan->byte_align = false;
if (!chan->dir) {
chan->dir = DMA_MEM_TO_MEM;
@@ -449,6 +471,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
}
copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
+ if (dma_src & 0x7 || dma_dst & 0x7)
+ chan->byte_align = true;
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
new->desc.dsadr = dma_src;
@@ -485,6 +509,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
new->desc.ddadr = DDADR_STOP;
new->desc.dcmd |= DCMD_ENDIRQEN;
+ chan->cyclic_first = NULL;
+
return &first->async_tx;
fail:
@@ -495,8 +521,8 @@ fail:
static struct dma_async_tx_descriptor *
mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction dir,
- unsigned long flags, void *context)
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
{
struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
@@ -508,12 +534,16 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
if ((sgl == NULL) || (sg_len == 0))
return NULL;
+ chan->byte_align = false;
+
for_each_sg(sgl, sg, sg_len, i) {
addr = sg_dma_address(sg);
avail = sg_dma_len(sgl);
do {
len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
+ if (addr & 0x7)
+ chan->byte_align = true;
/* allocate and populate the descriptor */
new = mmp_pdma_alloc_descriptor(chan);
@@ -556,6 +586,95 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
new->desc.ddadr = DDADR_STOP;
new->desc.dcmd |= DCMD_ENDIRQEN;
+ chan->dir = dir;
+ chan->cyclic_first = NULL;
+
+ return &first->async_tx;
+
+fail:
+ if (first)
+ mmp_pdma_free_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
+ dma_addr_t buf_addr, size_t len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct mmp_pdma_chan *chan;
+ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
+ dma_addr_t dma_src, dma_dst;
+
+ if (!dchan || !len || !period_len)
+ return NULL;
+
+ /* the buffer length must be a multiple of period_len */
+ if (len % period_len != 0)
+ return NULL;
+
+ if (period_len > PDMA_MAX_DESC_BYTES)
+ return NULL;
+
+ chan = to_mmp_pdma_chan(dchan);
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ dma_src = buf_addr;
+ dma_dst = chan->dev_addr;
+ break;
+ case DMA_DEV_TO_MEM:
+ dma_dst = buf_addr;
+ dma_src = chan->dev_addr;
+ break;
+ default:
+ dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
+ return NULL;
+ }
+
+ chan->dir = direction;
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = mmp_pdma_alloc_descriptor(chan);
+ if (!new) {
+ dev_err(chan->dev, "no memory for desc\n");
+ goto fail;
+ }
+
+ new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
+ (DCMD_LENGTH & period_len));
+ new->desc.dsadr = dma_src;
+ new->desc.dtadr = dma_dst;
+
+ if (!first)
+ first = new;
+ else
+ prev->desc.ddadr = new->async_tx.phys;
+
+ new->async_tx.cookie = 0;
+ async_tx_ack(&new->async_tx);
+
+ prev = new;
+ len -= period_len;
+
+ if (chan->dir == DMA_MEM_TO_DEV)
+ dma_src += period_len;
+ else
+ dma_dst += period_len;
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ first->async_tx.flags = flags; /* client is in control of this ack */
+ first->async_tx.cookie = -EBUSY;
+
+ /* make the cyclic link */
+ new->desc.ddadr = first->async_tx.phys;
+ chan->cyclic_first = first;
+
return &first->async_tx;
fail:
@@ -565,12 +684,11 @@ fail:
}
static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
- unsigned long arg)
+ unsigned long arg)
{
struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
struct dma_slave_config *cfg = (void *)arg;
unsigned long flags;
- int ret = 0;
u32 maxburst = 0, addr = 0;
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
@@ -580,10 +698,7 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
disable_chan(chan->phy);
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
spin_lock_irqsave(&chan->desc_lock, flags);
mmp_pdma_free_desc_list(chan, &chan->chain_pending);
mmp_pdma_free_desc_list(chan, &chan->chain_running);
@@ -617,29 +732,106 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
else if (maxburst == 32)
chan->dcmd |= DCMD_BURST32;
- if (cfg) {
- chan->dir = cfg->direction;
- chan->drcmr = cfg->slave_id;
- }
+ chan->dir = cfg->direction;
chan->dev_addr = addr;
+ /* FIXME: drivers should be ported over to use the filter
+ * function. Once that's done, the following two lines can
+ * be removed.
+ */
+ if (cfg->slave_id)
+ chan->drcmr = cfg->slave_id;
break;
default:
return -ENOSYS;
}
- return ret;
+ return 0;
+}
+
+static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
+ dma_cookie_t cookie)
+{
+ struct mmp_pdma_desc_sw *sw;
+ u32 curr, residue = 0;
+ bool passed = false;
+ bool cyclic = chan->cyclic_first != NULL;
+
+ /*
+ * If the channel does not have a phy pointer anymore, it has already
+ * been completed. Therefore, its residue is 0.
+ */
+ if (!chan->phy)
+ return 0;
+
+ if (chan->dir == DMA_DEV_TO_MEM)
+ curr = readl(chan->phy->base + DTADR(chan->phy->idx));
+ else
+ curr = readl(chan->phy->base + DSADR(chan->phy->idx));
+
+ list_for_each_entry(sw, &chan->chain_running, node) {
+ u32 start, end, len;
+
+ if (chan->dir == DMA_DEV_TO_MEM)
+ start = sw->desc.dtadr;
+ else
+ start = sw->desc.dsadr;
+
+ len = sw->desc.dcmd & DCMD_LENGTH;
+ end = start + len;
+
+ /*
+ * 'passed' will be latched once we found the descriptor which
+ * lies inside the boundaries of the curr pointer. All
+ * descriptors that occur in the list _after_ we found that
+ * partially handled descriptor are still to be processed and
+ * are hence added to the residual bytes counter.
+ */
+
+ if (passed) {
+ residue += len;
+ } else if (curr >= start && curr <= end) {
+ residue += end - curr;
+ passed = true;
+ }
+
+ /*
+ * Descriptors that have the ENDIRQEN bit set mark the end of a
+ * transaction chain, and the cookie assigned with it has been
+ * returned previously from mmp_pdma_tx_submit().
+ *
+ * In case we have multiple transactions in the running chain,
+ * and the cookie does not match the one the user asked us
+ * about, reset the state variables and start over.
+ *
+ * This logic does not apply to cyclic transactions, where all
+ * descriptors have the ENDIRQEN bit set, and for which we
+ * can't have multiple transactions on one channel anyway.
+ */
+ if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN))
+ continue;
+
+ if (sw->async_tx.cookie == cookie) {
+ return residue;
+ } else {
+ residue = 0;
+ passed = false;
+ }
+ }
+
+ /* We should only get here in case of cyclic transactions */
+ return residue;
}
static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
- dma_cookie_t cookie, struct dma_tx_state *txstate)
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
enum dma_status ret;
- unsigned long flags;
- spin_lock_irqsave(&chan->desc_lock, flags);
ret = dma_cookie_status(dchan, cookie, txstate);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ if (likely(ret != DMA_ERROR))
+ dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
return ret;
}
@@ -670,29 +862,50 @@ static void dma_do_tasklet(unsigned long data)
LIST_HEAD(chain_cleanup);
unsigned long flags;
- /* submit pending list; callback for each desc; free desc */
+ if (chan->cyclic_first) {
+ dma_async_tx_callback cb = NULL;
+ void *cb_data = NULL;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ desc = chan->cyclic_first;
+ cb = desc->async_tx.callback;
+ cb_data = desc->async_tx.callback_param;
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ if (cb)
+ cb(cb_data);
- /* update the cookie if we have some descriptors to cleanup */
- if (!list_empty(&chan->chain_running)) {
- dma_cookie_t cookie;
+ return;
+ }
- desc = to_mmp_pdma_desc(chan->chain_running.prev);
- cookie = desc->async_tx.cookie;
- dma_cookie_complete(&desc->async_tx);
+ /* submit pending list; callback for each desc; free desc */
+ spin_lock_irqsave(&chan->desc_lock, flags);
- dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+ list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
+ /*
+ * move the descriptors to a temporary list so we can drop
+ * the lock during the entire cleanup operation
+ */
+ list_move(&desc->node, &chain_cleanup);
+
+ /*
+ * Look for the first list entry which has the ENDIRQEN flag
+ * set. That is the descriptor we got an interrupt for, so
+ * complete that transaction and its cookie.
+ */
+ if (desc->desc.dcmd & DCMD_ENDIRQEN) {
+ dma_cookie_t cookie = desc->async_tx.cookie;
+ dma_cookie_complete(&desc->async_tx);
+ dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+ break;
+ }
}
/*
- * move the descriptors to a temporary list so we can drop the lock
- * during the entire cleanup operation
+ * The hardware is idle and ready for more when the
+ * chain_running list is empty.
*/
- list_splice_tail_init(&chan->chain_running, &chain_cleanup);
-
- /* the hardware is now idle and ready for more */
- chan->idle = true;
+ chan->idle = list_empty(&chan->chain_running);
/* Start any pending transactions automatically */
start_pending_queue(chan);
@@ -720,15 +933,13 @@ static int mmp_pdma_remove(struct platform_device *op)
return 0;
}
-static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
- int idx, int irq)
+static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
{
struct mmp_pdma_phy *phy = &pdev->phy[idx];
struct mmp_pdma_chan *chan;
int ret;
- chan = devm_kzalloc(pdev->dev,
- sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+ chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL);
if (chan == NULL)
return -ENOMEM;
@@ -736,8 +947,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
phy->base = pdev->base;
if (irq) {
- ret = devm_request_irq(pdev->dev, irq,
- mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
+ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
+ IRQF_SHARED, "pdma", phy);
if (ret) {
dev_err(pdev->dev, "channel request irq fail!\n");
return ret;
@@ -752,8 +963,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
INIT_LIST_HEAD(&chan->chain_running);
/* register virt channel to dma engine */
- list_add_tail(&chan->chan.device_node,
- &pdev->device.channels);
+ list_add_tail(&chan->chan.device_node, &pdev->device.channels);
return 0;
}
@@ -764,6 +974,21 @@ static struct of_device_id mmp_pdma_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
+static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct mmp_pdma_device *d = ofdma->of_dma_data;
+ struct dma_chan *chan;
+
+ chan = dma_get_any_slave_channel(&d->device);
+ if (!chan)
+ return NULL;
+
+ to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
+
+ return chan;
+}
+
static int mmp_pdma_probe(struct platform_device *op)
{
struct mmp_pdma_device *pdev;
@@ -776,20 +1001,20 @@ static int mmp_pdma_probe(struct platform_device *op)
pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
if (!pdev)
return -ENOMEM;
+
pdev->dev = &op->dev;
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- if (!iores)
- return -EINVAL;
+ spin_lock_init(&pdev->phy_lock);
- pdev->base = devm_request_and_ioremap(pdev->dev, iores);
- if (!pdev->base)
- return -EADDRNOTAVAIL;
+ iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+ pdev->base = devm_ioremap_resource(pdev->dev, iores);
+ if (IS_ERR(pdev->base))
+ return PTR_ERR(pdev->base);
of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
if (of_id)
- of_property_read_u32(pdev->dev->of_node,
- "#dma-channels", &dma_channels);
+ of_property_read_u32(pdev->dev->of_node, "#dma-channels",
+ &dma_channels);
else if (pdata && pdata->dma_channels)
dma_channels = pdata->dma_channels;
else
@@ -801,8 +1026,8 @@ static int mmp_pdma_probe(struct platform_device *op)
irq_num++;
}
- pdev->phy = devm_kzalloc(pdev->dev,
- dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+ pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy),
+ GFP_KERNEL);
if (pdev->phy == NULL)
return -ENOMEM;
@@ -811,8 +1036,8 @@ static int mmp_pdma_probe(struct platform_device *op)
if (irq_num != dma_channels) {
/* all chan share one irq, demux inside */
irq = platform_get_irq(op, 0);
- ret = devm_request_irq(pdev->dev, irq,
- mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
+ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
+ IRQF_SHARED, "pdma", pdev);
if (ret)
return ret;
}
@@ -826,13 +1051,15 @@ static int mmp_pdma_probe(struct platform_device *op)
dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
- dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
+ dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
pdev->device.dev = &op->dev;
pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
pdev->device.device_tx_status = mmp_pdma_tx_status;
pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
+ pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
pdev->device.device_issue_pending = mmp_pdma_issue_pending;
pdev->device.device_control = mmp_pdma_control;
pdev->device.copy_align = PDMA_ALIGNMENT;
@@ -848,7 +1075,18 @@ static int mmp_pdma_probe(struct platform_device *op)
return ret;
}
- dev_info(pdev->device.dev, "initialized\n");
+ if (op->dev.of_node) {
+ /* Device-tree DMA controller registration */
+ ret = of_dma_controller_register(op->dev.of_node,
+ mmp_pdma_dma_xlate, pdev);
+ if (ret < 0) {
+ dev_err(&op->dev, "of_dma_controller_register failed\n");
+ return ret;
+ }
+ }
+
+ platform_set_drvdata(op, pdev);
+ dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
return 0;
}
@@ -868,8 +1106,21 @@ static struct platform_driver mmp_pdma_driver = {
.remove = mmp_pdma_remove,
};
+bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+
+ if (chan->device->dev->driver != &mmp_pdma_driver.driver)
+ return false;
+
+ c->drcmr = *(unsigned int *)param;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
+
module_platform_driver(mmp_pdma_driver);
-MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
+MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_LICENSE("GPL v2");